From 54cc191759fd4734322ebe5f3fbdb25cc9257d2a Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Wed, 19 Jul 2023 12:20:25 +0300 Subject: [PATCH 1/8] change whitelist struct Signed-off-by: rcohencyberarmor --- .../C-0001-forbiddencontainerregistries.json | 8 +++- controls/C-0002-execintocontainer.json | 8 +++- ...C-0004-resourcesmemorylimitandrequest.json | 8 +++- ...C-0005-apiserverinsecureportisenabled.json | 7 ++- controls/C-0007-datadestruction.json | 10 ++++- controls/C-0009-resourcelimits.json | 8 +++- ...ationscredentialsinconfigurationfiles.json | 8 +++- controls/C-0013-nonrootcontainers.json | 8 +++- .../C-0014-accesskubernetesdashboard.json | 8 +++- controls/C-0015-listkubernetessecrets.json | 8 +++- controls/C-0016-allowprivilegeescalation.json | 8 +++- .../C-0017-immutablecontainerfilesystem.json | 8 +++- controls/C-0018-configuredreadinessprobe.json | 8 +++- controls/C-0020-mountserviceprincipal.json | 8 +++- .../C-0021-exposedsensitiveinterfaces.json | 7 ++- controls/C-0026-kubernetescronjob.json | 7 ++- controls/C-0030-ingressandegressblocked.json | 8 +++- controls/C-0031-deletekubernetesevents.json | 8 +++- ...0034-automaticmappingofserviceaccount.json | 8 +++- controls/C-0035-clusteradminbinding.json | 8 +++- ...aliciousadmissioncontrollervalidating.json | 7 ++- controls/C-0037-corednspoisoning.json | 8 +++- controls/C-0038-hostpidipcprivileges.json | 8 +++- ...-maliciousadmissioncontrollermutating.json | 7 ++- controls/C-0041-hostnetworkaccess.json | 8 +++- ...-0042-sshserverrunninginsidecontainer.json | 8 +++- controls/C-0044-containerhostport.json | 8 +++- controls/C-0045-writablehostpathmount.json | 8 +++- controls/C-0046-insecurecapabilities.json | 8 +++- controls/C-0048-hostpathmount.json | 8 +++- controls/C-0049-networkmapping.json | 8 +++- .../C-0050-resourcescpulimitandrequest.json | 8 +++- controls/C-0052-instancemetadataapi.json | 7 ++- .../C-0053-accesscontainerserviceaccount.json | 10 ++++- .../C-0054-clusterinternalnetworking.json | 8 +++- controls/C-0055-linuxhardening.json | 8 +++- controls/C-0056-configuredlivenessprobe.json | 8 +++- controls/C-0057-privilegedcontainer.json | 8 +++- ...mlinkforarbitraryhostfilesystemaccess.json | 7 ++- ...ingresssnippetannotationvulnerability.json | 8 +++- controls/C-0061-podsindefaultnamespace.json | 8 +++- .../C-0062-sudoincontainerentrypoint.json | 8 +++- controls/C-0063-portforwardingprivileges.json | 8 +++- controls/C-0065-noimpersonation.json | 8 +++- .../C-0066-secretetcdencryptionenabled.json | 7 ++- controls/C-0067-auditlogsenabled.json | 7 ++- controls/C-0068-pspenabled.json | 8 +++- ...isableanonymousaccesstokubeletservice.json | 7 ++- ...enforcekubeletclienttlsauthentication.json | 9 +++- controls/C-0073-nakedpods.json | 8 +++- ...C-0074-containersmountingdockersocket.json | 8 +++- .../C-0075-imagepullpolicyonlatesttag.json | 8 +++- controls/C-0076-labelusageforresources.json | 8 +++- controls/C-0077-k8scommonlabelsusage.json | 8 +++- .../C-0078-imagesfromallowedregistry.json | 8 +++- ...cve20220185linuxkernelcontainerescape.json | 7 ++- ...C-0081-cve202224348argocddirtraversal.json | 8 +++- ...lnerabilitiesexposedtoexternaltraffic.json | 8 +++- ...lnerabilitiesexposedtoexternaltraffic.json | 8 +++- ...swithexcessiveamountofvulnerabilities.json | 8 +++- ...086-cve20220492cgroupscontainerescape.json | 7 ++- ...C-0087-cve202223648containerdfsescape.json | 7 ++- controls/C-0088-rbacenabled.json | 7 ++- ...ve20223172aggregatedapiserverredirect.json | 7 ++- .../C-0090-cve202239328grafanaauthbypass.json | 8 +++- ...91-cve202247633kyvernosignaturebypass.json | 8 +++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...ificationfileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...ificationfileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...ificationfileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...ificationfileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...interfacefileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto700ormorerestrictive.json | 7 ++- ...datadirectoryownershipissettoetcdetcd.json | 7 ++- ...headminconffilepermissionsaresetto600.json | 7 ++- ...adminconffileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...dulerconffileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...nagerconffileownershipissettorootroot.json | 7 ++- ...ectoryandfileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...netespkikeyfilepermissionsaresetto600.json | 7 ++- ...rveranonymousauthargumentissettofalse.json | 7 ++- ...iservertokenauthfileparameterisnotset.json | 7 ++- ...iserverdenyserviceexternalipsisnotset.json | 7 ++- ...clientkeyargumentsaresetasappropriate.json | 7 ++- ...teauthorityargumentissetasappropriate.json | 7 ++- ...tionmodeargumentisnotsettoalwaysallow.json | 7 ++- ...authorizationmodeargumentincludesnode.json | 7 ++- ...authorizationmodeargumentincludesrbac.json | 7 ++- ...ssioncontrolplugineventratelimitisset.json | 7 ++- ...ssioncontrolpluginalwaysadmitisnotset.json | 7 ++- ...ioncontrolpluginalwayspullimagesisset.json | 9 +++- ...denyissetifpodsecuritypolicyisnotused.json | 7 ++- ...ssioncontrolpluginserviceaccountisset.json | 7 ++- ...ncontrolpluginnamespacelifecycleisset.json | 7 ++- ...sioncontrolpluginnoderestrictionisset.json | 7 ++- ...piserversecureportargumentisnotsetto0.json | 7 ++- ...piserverprofilingargumentissettofalse.json | 7 ++- ...theapiserverauditlogpathargumentisset.json | 7 ++- ...axageargumentissetto30orasappropriate.json | 7 ++- ...ackupargumentissetto10orasappropriate.json | 7 ++- ...sizeargumentissetto100orasappropriate.json | 7 ++- ...uesttimeoutargumentissetasappropriate.json | 7 ++- ...rviceaccountlookupargumentissettotrue.json | 7 ++- ...ountkeyfileargumentissetasappropriate.json | 7 ++- ...cdkeyfileargumentsaresetasappropriate.json | 7 ++- ...tekeyfileargumentsaresetasappropriate.json | 7 ++- ...lientcafileargumentissetasappropriate.json | 7 ++- ...retcdcafileargumentissetasappropriate.json | 7 ++- ...viderconfigargumentissetasappropriate.json | 7 ++- ...onprovidersareappropriatelyconfigured.json | 7 ++- ...ymakesuseofstrongcryptographicciphers.json | 7 ++- ...gcthresholdargumentissetasappropriate.json | 7 ++- ...rmanagerprofilingargumentissettofalse.json | 7 ++- ...accountcredentialsargumentissettotrue.json | 9 +++- ...vatekeyfileargumentissetasappropriate.json | 7 ++- ...rrootcafileargumentissetasappropriate.json | 7 ++- ...tservercertificateargumentissettotrue.json | 7 ++- ...nagerbindaddressargumentissetto127001.json | 7 ++- ...chedulerprofilingargumentissettofalse.json | 7 ++- ...dulerbindaddressargumentissetto127001.json | 7 ++- ...ndkeyfileargumentsaresetasappropriate.json | 7 ++- ...ttheclientcertauthargumentissettotrue.json | 7 ++- ...ethattheautotlsargumentisnotsettotrue.json | 7 ++- ...erkeyfileargumentsaresetasappropriate.json | 7 ++- ...peerclientcertauthargumentissettotrue.json | 11 +++-- ...tthepeerautotlsargumentisnotsettotrue.json | 7 ++- ...iquecertificateauthorityisusedforetcd.json | 7 ++- ...nsurethataminimalauditpolicyiscreated.json | 7 ++- ...eauditpolicycoverskeysecurityconcerns.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...etservicefileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...eexistsensureownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...beletconffileownershipissettorootroot.json | 7 ++- ...rmissionsaresetto600ormorerestrictive.json | 7 ++- ...thoritiesfileownershipissettorootroot.json | 7 ++- ...epermissionssetto600ormorerestrictive.json | 7 ++- ...dvalidatefileownershipissettorootroot.json | 7 ++- ...ttheanonymousauthargumentissettofalse.json | 7 ++- ...tionmodeargumentisnotsettoalwaysallow.json | 7 ++- ...lientcafileargumentissetasappropriate.json | 7 ++- ...fythatthereadonlyportargumentissetto0.json | 7 ++- ...nectionidletimeoutargumentisnotsetto0.json | 7 ++- ...tectkerneldefaultsargumentissettotrue.json | 7 ++- ...iptablesutilchainsargumentissettotrue.json | 7 ++- ...atthehostnameoverrideargumentisnotset.json | 7 ++- ...elwhichensuresappropriateeventcapture.json | 7 ++- ...tekeyfileargumentsaresetasappropriate.json | 7 ++- ...tecertificatesargumentisnotsettofalse.json | 7 ++- ...tservercertificateargumentissettotrue.json | 7 ++- ...ymakesuseofstrongcryptographicciphers.json | 7 ++- ...usteradminroleisonlyusedwhererequired.json | 7 ++- controls/C-0186-minimizeaccesstosecrets.json | 7 ++- ...mizewildcarduseinrolesandclusterroles.json | 8 +++- .../C-0188-minimizeaccesstocreatepods.json | 7 ++- ...aultserviceaccountsarenotactivelyused.json | 7 ++- ...unttokensareonlymountedwherenecessary.json | 7 ++- ...latepermissionsinthekubernetescluster.json | 7 ++- ...neactivepolicycontrolmechanisminplace.json | 7 ++- ...izetheadmissionofprivilegedcontainers.json | 7 ++- ...shingtosharethehostprocessidnamespace.json | 7 ++- ...nerswishingtosharethehostipcnamespace.json | 7 ++- ...wishingtosharethehostnetworknamespace.json | 7 ++- ...ontainerswithallowprivilegeescalation.json | 7 ++- ...-minimizetheadmissionofrootcontainers.json | 7 ++- ...nofcontainerswiththenet_rawcapability.json | 7 ++- ...sionofcontainerswithaddedcapabilities.json | 7 ++- ...nofcontainerswithcapabilitiesassigned.json | 7 ++- ...missionofwindowshostprocesscontainers.json | 7 ++- ...minimizetheadmissionofhostpathvolumes.json | 7 ++- ...dmissionofcontainerswhichusehostports.json | 7 ++- ...hatthecniinusesupportsnetworkpolicies.json | 7 ++- ...lnamespaceshavenetworkpoliciesdefined.json | 7 ++- ...ilesoversecretsasenvironmentvariables.json | 7 ++- .../C-0208-considerexternalsecretstorage.json | 7 ++- ...dariesbetweenresourcesusingnamespaces.json | 7 ++- ...ettodockerdefaultinyourpoddefinitions.json | 7 ++- ...ecuritycontexttoyourpodsandcontainers.json | 19 +++++--- ...12-thedefaultnamespaceshouldnotbeused.json | 7 ++- ...izetheadmissionofprivilegedcontainers.json | 7 ++- ...shingtosharethehostprocessidnamespace.json | 8 +++- ...nerswishingtosharethehostipcnamespace.json | 8 +++- ...wishingtosharethehostnetworknamespace.json | 8 +++- ...ontainerswithallowprivilegeescalation.json | 8 +++- ...-minimizetheadmissionofrootcontainers.json | 8 +++- ...sionofcontainerswithaddedcapabilities.json | 8 +++- ...nofcontainerswithcapabilitiesassigned.json | 8 +++- ...ecrimagescanningorathirdpartyprovider.json | 10 ++++- .../C-0222-minimizeuseraccesstoamazonecr.json | 9 +++- ...zeclusteraccesstoreadonlyforamazonecr.json | 9 +++- ...referusingdedicatedeksserviceaccounts.json | 7 ++- ...singacontaineroptimizedoswhenpossible.json | 7 ++- ...strictaccesstothecontrolplaneendpoint.json | 7 ++- ...ndpointenabledandpublicaccessdisabled.json | 7 ++- ...ureclustersarecreatedwithprivatenodes.json | 7 ++- ...orkpolicyisenabledandsetasappropriate.json | 7 ++- ...httpsloadbalancerswithtlscertificates.json | 7 ++- ...forkubernetesorupgradetoawscliv116156.json | 7 ++- ...erfargateforrunninguntrustedworkloads.json | 9 +++- .../C-0234-considerexternalsecretstorage.json | 7 ++- ...spermissionssetto644ormorerestrictive.json | 7 ++- controls/C-0236-verifyimagesignature.json | 8 +++- controls/C-0237-hasimagesignature.json | 8 +++- ...rmissionsaresetto644ormorerestrictive.json | 7 ++- ...referusingdedicatedaksserviceaccounts.json | 7 ++- ...orkpolicyisenabledandsetasappropriate.json | 9 +++- ...seazurerbacforkubernetesauthorization.json | 7 ++- .../C-0242-hostilemultitenantworkloads.json | 9 +++- ...derimagescanningorathirdpartyprovider.json | 9 +++- ...4-ensurekubernetessecretsareencrypted.json | 9 +++- ...httpsloadbalancerswithtlscertificates.json | 7 ++- .../C-0246-avoiduseofsystemmastersgroup.json | 9 +++- ...strictaccesstothecontrolplaneendpoint.json | 9 +++- ...ureclustersarecreatedwithprivatenodes.json | 9 +++- .../C-0249-restrictuntrustedworkloads.json | 11 +++-- ...oreadonlyforazurecontainerregistryacr.json | 43 +++++++++++-------- ...useraccesstoazurecontainerregistryacr.json | 7 ++- ...ndpointenabledandpublicaccessdisabled.json | 7 ++- controls/C-0253-deprecated-k8s-registry.json | 8 +++- controls/C-0254-enableauditlogs.json | 9 +++- controls/C-0255-workloadwithsecretaccess.json | 8 +++- controls/C-0256-exposuretointernet.json | 8 +++- controls/C-0257-pvcaccess.json | 8 +++- controls/C-0258-configmapaccess.json | 8 +++- .../C-0259-workloadwithcredentialaccess.json | 8 +++- controls/C-0260-missingnetworkpolicy.json | 8 +++- controls/C-0261-satokenmounted.json | 8 +++- controls/C-0262-anonymousaccessisenabled.json | 8 +++- go.work.sum | 16 +++---- scripts/validations.py | 19 ++++++++ 238 files changed, 1555 insertions(+), 292 deletions(-) mode change 100755 => 100644 controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json diff --git a/controls/C-0001-forbiddencontainerregistries.json b/controls/C-0001-forbiddencontainerregistries.json index 8ef165e2e..b64de070a 100644 --- a/controls/C-0001-forbiddencontainerregistries.json +++ b/controls/C-0001-forbiddencontainerregistries.json @@ -28,5 +28,11 @@ "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", "controlID": "C-0001", "baseScore": 7.0, - "example": "@controls/examples/c001.yaml" + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0002-execintocontainer.json b/controls/C-0002-execintocontainer.json index ddfb27964..f70edfd70 100644 --- a/controls/C-0002-execintocontainer.json +++ b/controls/C-0002-execintocontainer.json @@ -21,5 +21,11 @@ "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", "controlID": "C-0002", "baseScore": 5.0, - "example": "@controls/examples/c002.yaml" + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0004-resourcesmemorylimitandrequest.json b/controls/C-0004-resourcesmemorylimitandrequest.json index 3ff49fd30..8f6204ca6 100644 --- a/controls/C-0004-resourcesmemorylimitandrequest.json +++ b/controls/C-0004-resourcesmemorylimitandrequest.json @@ -23,5 +23,11 @@ ], "controlID": "C-0004", "example": "@controls/examples/c004.yaml", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0005-apiserverinsecureportisenabled.json b/controls/C-0005-apiserverinsecureportisenabled.json index ccf847108..74ed0dbf6 100644 --- a/controls/C-0005-apiserverinsecureportisenabled.json +++ b/controls/C-0005-apiserverinsecureportisenabled.json @@ -23,5 +23,10 @@ "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", "controlID": "C-0005", - "baseScore": 9.0 + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0007-datadestruction.json b/controls/C-0007-datadestruction.json index 0344045b8..f61042820 100644 --- a/controls/C-0007-datadestruction.json +++ b/controls/C-0007-datadestruction.json @@ -19,6 +19,12 @@ "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", "controlID": "C-0007", - "baseScore": 5.0, - "example": "@controls/examples/c007.yaml" + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0009-resourcelimits.json b/controls/C-0009-resourcelimits.json index a7b1671aa..1e775cd75 100644 --- a/controls/C-0009-resourcelimits.json +++ b/controls/C-0009-resourcelimits.json @@ -23,5 +23,11 @@ "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", "controlID": "C-0009", "baseScore": 7.0, - "example": "@controls/examples/c009.yaml" + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0012-applicationscredentialsinconfigurationfiles.json b/controls/C-0012-applicationscredentialsinconfigurationfiles.json index 2514cbe92..2280a264d 100644 --- a/controls/C-0012-applicationscredentialsinconfigurationfiles.json +++ b/controls/C-0012-applicationscredentialsinconfigurationfiles.json @@ -36,5 +36,11 @@ "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", "controlID": "C-0012", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0013-nonrootcontainers.json b/controls/C-0013-nonrootcontainers.json index 29274949c..4c03dff93 100644 --- a/controls/C-0013-nonrootcontainers.json +++ b/controls/C-0013-nonrootcontainers.json @@ -24,5 +24,11 @@ "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", "controlID": "C-0013", "baseScore": 6.0, - "example": "@controls/examples/c013.yaml" + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0014-accesskubernetesdashboard.json b/controls/C-0014-accesskubernetesdashboard.json index 6aa8284ea..cb861542a 100644 --- a/controls/C-0014-accesskubernetesdashboard.json +++ b/controls/C-0014-accesskubernetesdashboard.json @@ -21,5 +21,11 @@ "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", "controlID": "C-0014", - "baseScore": 2.0 + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0015-listkubernetessecrets.json b/controls/C-0015-listkubernetessecrets.json index d2013e573..f7f2b3e0e 100644 --- a/controls/C-0015-listkubernetessecrets.json +++ b/controls/C-0015-listkubernetessecrets.json @@ -29,5 +29,11 @@ "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", "controlID": "C-0015", "baseScore": 7.0, - "example": "@controls/examples/c015.yaml" + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0016-allowprivilegeescalation.json b/controls/C-0016-allowprivilegeescalation.json index 3b87271b7..b284dd4f4 100644 --- a/controls/C-0016-allowprivilegeescalation.json +++ b/controls/C-0016-allowprivilegeescalation.json @@ -23,5 +23,11 @@ "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", "controlID": "C-0016", "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml" + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0017-immutablecontainerfilesystem.json b/controls/C-0017-immutablecontainerfilesystem.json index 8f886dfca..46220ebf2 100644 --- a/controls/C-0017-immutablecontainerfilesystem.json +++ b/controls/C-0017-immutablecontainerfilesystem.json @@ -25,5 +25,11 @@ "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", "controlID": "C-0017", "baseScore": 3.0, - "example": "@controls/examples/c017.yaml" + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0018-configuredreadinessprobe.json b/controls/C-0018-configuredreadinessprobe.json index bb4de61dd..ae6d7e12f 100644 --- a/controls/C-0018-configuredreadinessprobe.json +++ b/controls/C-0018-configuredreadinessprobe.json @@ -14,5 +14,11 @@ "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", "controlID": "C-0018", "example": "@controls/examples/c018.yaml", - "baseScore": 3 + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0020-mountserviceprincipal.json b/controls/C-0020-mountserviceprincipal.json index 31d308a09..69dd1925b 100644 --- a/controls/C-0020-mountserviceprincipal.json +++ b/controls/C-0020-mountserviceprincipal.json @@ -18,5 +18,11 @@ "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", "controlID": "C-0020", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0021-exposedsensitiveinterfaces.json b/controls/C-0021-exposedsensitiveinterfaces.json index 1c29ae771..67d3be8f0 100644 --- a/controls/C-0021-exposedsensitiveinterfaces.json +++ b/controls/C-0021-exposedsensitiveinterfaces.json @@ -19,5 +19,10 @@ "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", "controlID": "C-0021", - "baseScore": 6.0 + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0026-kubernetescronjob.json b/controls/C-0026-kubernetescronjob.json index 38c4fe702..bc8731ba8 100644 --- a/controls/C-0026-kubernetescronjob.json +++ b/controls/C-0026-kubernetescronjob.json @@ -17,5 +17,10 @@ "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", "test": "We list all CronJobs that exist in cluster for the user to approve.", "controlID": "C-0026", - "baseScore": 1.0 + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0030-ingressandegressblocked.json b/controls/C-0030-ingressandegressblocked.json index 93c081227..4225dad7d 100644 --- a/controls/C-0030-ingressandegressblocked.json +++ b/controls/C-0030-ingressandegressblocked.json @@ -15,5 +15,11 @@ "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", "controlID": "C-0030", "baseScore": 6.0, - "example": "@controls/examples/c030.yaml" + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0031-deletekubernetesevents.json b/controls/C-0031-deletekubernetesevents.json index 761679ded..7f1808e29 100644 --- a/controls/C-0031-deletekubernetesevents.json +++ b/controls/C-0031-deletekubernetesevents.json @@ -29,5 +29,11 @@ "test": "List who has delete/deletecollection RBAC permissions on events.", "controlID": "C-0031", "baseScore": 4.0, - "example": "@controls/examples/c031.yaml" + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0034-automaticmappingofserviceaccount.json b/controls/C-0034-automaticmappingofserviceaccount.json index 54cb98f04..45b0265b5 100644 --- a/controls/C-0034-automaticmappingofserviceaccount.json +++ b/controls/C-0034-automaticmappingofserviceaccount.json @@ -25,5 +25,11 @@ "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", "controlID": "C-0034", "baseScore": 6.0, - "example": "@controls/examples/c034.yaml" + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0035-clusteradminbinding.json b/controls/C-0035-clusteradminbinding.json index 61441e542..1f18b104b 100644 --- a/controls/C-0035-clusteradminbinding.json +++ b/controls/C-0035-clusteradminbinding.json @@ -29,5 +29,11 @@ "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", "controlID": "C-0035", - "baseScore": 6.0 + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0036-maliciousadmissioncontrollervalidating.json b/controls/C-0036-maliciousadmissioncontrollervalidating.json index 484ead36d..0fc953bb7 100644 --- a/controls/C-0036-maliciousadmissioncontrollervalidating.json +++ b/controls/C-0036-maliciousadmissioncontrollervalidating.json @@ -25,5 +25,10 @@ "list-all-validating-webhooks" ], "controlID": "C-0036", - "baseScore": 3.0 + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0037-corednspoisoning.json b/controls/C-0037-corednspoisoning.json index ca840cc52..7c6028772 100644 --- a/controls/C-0037-corednspoisoning.json +++ b/controls/C-0037-corednspoisoning.json @@ -26,5 +26,11 @@ "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", "controlID": "C-0037", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0038-hostpidipcprivileges.json b/controls/C-0038-hostpidipcprivileges.json index fb897a3e2..d5b338689 100644 --- a/controls/C-0038-hostpidipcprivileges.json +++ b/controls/C-0038-hostpidipcprivileges.json @@ -23,5 +23,11 @@ "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", "controlID": "C-0038", "baseScore": 7.0, - "example": "@controls/examples/c038.yaml" + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0039-maliciousadmissioncontrollermutating.json b/controls/C-0039-maliciousadmissioncontrollermutating.json index e947da4de..ed3b23139 100644 --- a/controls/C-0039-maliciousadmissioncontrollermutating.json +++ b/controls/C-0039-maliciousadmissioncontrollermutating.json @@ -24,5 +24,10 @@ "list-all-mutating-webhooks" ], "controlID": "C-0039", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0041-hostnetworkaccess.json b/controls/C-0041-hostnetworkaccess.json index 10092c3e7..3e0072e82 100644 --- a/controls/C-0041-hostnetworkaccess.json +++ b/controls/C-0041-hostnetworkaccess.json @@ -26,5 +26,11 @@ "test": "", "controlID": "C-0041", "baseScore": 7.0, - "example": "@controls/examples/c041.yaml" + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0042-sshserverrunninginsidecontainer.json b/controls/C-0042-sshserverrunninginsidecontainer.json index 9a1ef0ab9..d2f3e7a23 100644 --- a/controls/C-0042-sshserverrunninginsidecontainer.json +++ b/controls/C-0042-sshserverrunninginsidecontainer.json @@ -18,5 +18,11 @@ "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", "controlID": "C-0042", - "baseScore": 3.0 + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0044-containerhostport.json b/controls/C-0044-containerhostport.json index bd74818c7..15650b77a 100644 --- a/controls/C-0044-containerhostport.json +++ b/controls/C-0044-containerhostport.json @@ -25,5 +25,11 @@ "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", "controlID": "C-0044", "baseScore": 4.0, - "example": "@controls/examples/c044.yaml" + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0045-writablehostpathmount.json b/controls/C-0045-writablehostpathmount.json index 4663cbffb..e392fa0ca 100644 --- a/controls/C-0045-writablehostpathmount.json +++ b/controls/C-0045-writablehostpathmount.json @@ -31,5 +31,11 @@ "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", "controlID": "C-0045", "baseScore": 8.0, - "example": "@controls/examples/c045.yaml" + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0046-insecurecapabilities.json b/controls/C-0046-insecurecapabilities.json index 67f74af50..7afb7cac2 100644 --- a/controls/C-0046-insecurecapabilities.json +++ b/controls/C-0046-insecurecapabilities.json @@ -25,5 +25,11 @@ "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", "controlID": "C-0046", "baseScore": 7.0, - "example": "@controls/examples/c046.yaml" + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0048-hostpathmount.json b/controls/C-0048-hostpathmount.json index acbdb0f89..eb981e92d 100644 --- a/controls/C-0048-hostpathmount.json +++ b/controls/C-0048-hostpathmount.json @@ -25,5 +25,11 @@ "alert-any-hostpath" ], "controlID": "C-0048", - "baseScore": 7.0 + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0049-networkmapping.json b/controls/C-0049-networkmapping.json index 59d93fb62..3f605876e 100644 --- a/controls/C-0049-networkmapping.json +++ b/controls/C-0049-networkmapping.json @@ -27,5 +27,11 @@ "test": "Check for each namespace if there is a network policy defined.", "controlID": "C-0049", "baseScore": 3.0, - "example": "@controls/examples/c049.yaml" + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0050-resourcescpulimitandrequest.json b/controls/C-0050-resourcescpulimitandrequest.json index 1867e8cef..697be92d4 100644 --- a/controls/C-0050-resourcescpulimitandrequest.json +++ b/controls/C-0050-resourcescpulimitandrequest.json @@ -14,5 +14,11 @@ "resources-cpu-limit-and-request" ], "controlID": "C-0050", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0052-instancemetadataapi.json b/controls/C-0052-instancemetadataapi.json index cbec46c46..37cd4dcf5 100644 --- a/controls/C-0052-instancemetadataapi.json +++ b/controls/C-0052-instancemetadataapi.json @@ -28,5 +28,10 @@ "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", "controlID": "C-0052", - "baseScore": 7.0 + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0053-accesscontainerserviceaccount.json b/controls/C-0053-accesscontainerserviceaccount.json index bc8e87b03..6bb29a690 100644 --- a/controls/C-0053-accesscontainerserviceaccount.json +++ b/controls/C-0053-accesscontainerserviceaccount.json @@ -29,5 +29,11 @@ "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", "controlID": "C-0053", - "baseScore": 6.0 -} + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } +} \ No newline at end of file diff --git a/controls/C-0054-clusterinternalnetworking.json b/controls/C-0054-clusterinternalnetworking.json index affbd4791..5be7c4845 100644 --- a/controls/C-0054-clusterinternalnetworking.json +++ b/controls/C-0054-clusterinternalnetworking.json @@ -27,5 +27,11 @@ "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", "test": "Check for each namespace if there is a network policy defined.", "controlID": "C-0054", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0055-linuxhardening.json b/controls/C-0055-linuxhardening.json index eb7b16462..3885822b8 100644 --- a/controls/C-0055-linuxhardening.json +++ b/controls/C-0055-linuxhardening.json @@ -23,5 +23,11 @@ "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", "controlID": "C-0055", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0056-configuredlivenessprobe.json b/controls/C-0056-configuredlivenessprobe.json index 19a68f45a..db8f7e216 100644 --- a/controls/C-0056-configuredlivenessprobe.json +++ b/controls/C-0056-configuredlivenessprobe.json @@ -13,5 +13,11 @@ ], "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", "controlID": "C-0056", - "baseScore": 4 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0057-privilegedcontainer.json b/controls/C-0057-privilegedcontainer.json index df30b9532..c75db1f75 100644 --- a/controls/C-0057-privilegedcontainer.json +++ b/controls/C-0057-privilegedcontainer.json @@ -26,5 +26,11 @@ "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", "controlID": "C-0057", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json b/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json index fb536dcb2..06e1f72cb 100644 --- a/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json +++ b/controls/C-0058-cve202125741usingsymlinkforarbitraryhostfilesystemaccess.json @@ -22,5 +22,10 @@ "Symlink-Exchange-Can-Allow-Host-Filesystem-Access" ], "controlID": "C-0058", - "baseScore": 6.0 + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json b/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json index be5501eea..7c7bd2ee1 100644 --- a/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json +++ b/controls/C-0059-cve202125742nginxingresssnippetannotationvulnerability.json @@ -23,5 +23,11 @@ "nginx-ingress-snippet-annotation-vulnerability" ], "controlID": "C-0059", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0061-podsindefaultnamespace.json b/controls/C-0061-podsindefaultnamespace.json index 5233973f8..1d4c31b24 100644 --- a/controls/C-0061-podsindefaultnamespace.json +++ b/controls/C-0061-podsindefaultnamespace.json @@ -15,5 +15,11 @@ "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", "test": "Check that there are no pods in the 'default' namespace", "controlID": "C-0061", - "baseScore": 3 + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0062-sudoincontainerentrypoint.json b/controls/C-0062-sudoincontainerentrypoint.json index b8346d6d1..d4f6de8e5 100644 --- a/controls/C-0062-sudoincontainerentrypoint.json +++ b/controls/C-0062-sudoincontainerentrypoint.json @@ -23,5 +23,11 @@ "test": "Check that there is no 'sudo' in the container entrypoint", "controlID": "C-0062", "baseScore": 5.0, - "example": "@controls/examples/c062.yaml" + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0063-portforwardingprivileges.json b/controls/C-0063-portforwardingprivileges.json index 2e37c4ac0..3104bb0b2 100644 --- a/controls/C-0063-portforwardingprivileges.json +++ b/controls/C-0063-portforwardingprivileges.json @@ -28,5 +28,11 @@ "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", "controlID": "C-0063", "baseScore": 5.0, - "example": "@controls/examples/c063.yaml" + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0065-noimpersonation.json b/controls/C-0065-noimpersonation.json index 183b96c64..94433d2d1 100644 --- a/controls/C-0065-noimpersonation.json +++ b/controls/C-0065-noimpersonation.json @@ -26,5 +26,11 @@ ], "controlID": "C-0065", "baseScore": 6.0, - "example": "@controls/examples/c065.yaml" + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0066-secretetcdencryptionenabled.json b/controls/C-0066-secretetcdencryptionenabled.json index 45c4ccbf2..a4730b216 100644 --- a/controls/C-0066-secretetcdencryptionenabled.json +++ b/controls/C-0066-secretetcdencryptionenabled.json @@ -24,5 +24,10 @@ "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", "controlID": "C-0066", - "baseScore": 6.0 + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0067-auditlogsenabled.json b/controls/C-0067-auditlogsenabled.json index f79363ddd..ef31f9f78 100644 --- a/controls/C-0067-auditlogsenabled.json +++ b/controls/C-0067-auditlogsenabled.json @@ -24,5 +24,10 @@ "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", "controlID": "C-0067", - "baseScore": 5.0 + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0068-pspenabled.json b/controls/C-0068-pspenabled.json index 107e81181..14ec03b92 100644 --- a/controls/C-0068-pspenabled.json +++ b/controls/C-0068-pspenabled.json @@ -24,5 +24,11 @@ "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", "controlID": "C-0068", - "baseScore": 1.0 + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0069-disableanonymousaccesstokubeletservice.json b/controls/C-0069-disableanonymousaccesstokubeletservice.json index b082227b7..1a7ba3c3d 100644 --- a/controls/C-0069-disableanonymousaccesstokubeletservice.json +++ b/controls/C-0069-disableanonymousaccesstokubeletservice.json @@ -23,5 +23,10 @@ "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", "controlID": "C-0069", - "baseScore": 10 + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0070-enforcekubeletclienttlsauthentication.json b/controls/C-0070-enforcekubeletclienttlsauthentication.json index 9a70d3746..8f75f136e 100644 --- a/controls/C-0070-enforcekubeletclienttlsauthentication.json +++ b/controls/C-0070-enforcekubeletclienttlsauthentication.json @@ -23,5 +23,10 @@ "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", "controlID": "C-0070", - "baseScore": 9.0 -} + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } +} \ No newline at end of file diff --git a/controls/C-0073-nakedpods.json b/controls/C-0073-nakedpods.json index 4ead9b663..6a82d0082 100644 --- a/controls/C-0073-nakedpods.json +++ b/controls/C-0073-nakedpods.json @@ -14,5 +14,11 @@ "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", "controlID": "C-0073", - "baseScore": 3 + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0074-containersmountingdockersocket.json b/controls/C-0074-containersmountingdockersocket.json index f0e2ca8ef..5b6ac203a 100644 --- a/controls/C-0074-containersmountingdockersocket.json +++ b/controls/C-0074-containersmountingdockersocket.json @@ -14,5 +14,11 @@ "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", "controlID": "C-0074", - "baseScore": 5 + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0075-imagepullpolicyonlatesttag.json b/controls/C-0075-imagepullpolicyonlatesttag.json index d932795ca..a0dc7f74b 100644 --- a/controls/C-0075-imagepullpolicyonlatesttag.json +++ b/controls/C-0075-imagepullpolicyonlatesttag.json @@ -14,5 +14,11 @@ "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", "test": "If imagePullPolicy = always pass, else fail.", "controlID": "C-0075", - "baseScore": 2 + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0076-labelusageforresources.json b/controls/C-0076-labelusageforresources.json index 472286603..e3c42c1b5 100644 --- a/controls/C-0076-labelusageforresources.json +++ b/controls/C-0076-labelusageforresources.json @@ -15,5 +15,11 @@ "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", "controlID": "C-0076", - "baseScore": 2 + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0077-k8scommonlabelsusage.json b/controls/C-0077-k8scommonlabelsusage.json index 1726e62ae..b785de785 100644 --- a/controls/C-0077-k8scommonlabelsusage.json +++ b/controls/C-0077-k8scommonlabelsusage.json @@ -15,5 +15,11 @@ "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", "controlID": "C-0077", - "baseScore": 2.0 + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0078-imagesfromallowedregistry.json b/controls/C-0078-imagesfromallowedregistry.json index fe6e3497d..45839c096 100644 --- a/controls/C-0078-imagesfromallowedregistry.json +++ b/controls/C-0078-imagesfromallowedregistry.json @@ -28,5 +28,11 @@ "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", "test": "Checks if image is from allowed listed registry.", "controlID": "C-0078", - "baseScore": 5.0 + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0079-cve20220185linuxkernelcontainerescape.json b/controls/C-0079-cve20220185linuxkernelcontainerescape.json index b13c9d924..8ee6db6f6 100644 --- a/controls/C-0079-cve20220185linuxkernelcontainerescape.json +++ b/controls/C-0079-cve20220185linuxkernelcontainerescape.json @@ -24,5 +24,10 @@ "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", "controlID": "C-0079", "baseScore": 4.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0081-cve202224348argocddirtraversal.json b/controls/C-0081-cve202224348argocddirtraversal.json index acc49d027..172993781 100644 --- a/controls/C-0081-cve202224348argocddirtraversal.json +++ b/controls/C-0081-cve202224348argocddirtraversal.json @@ -23,5 +23,11 @@ "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", "controlID": "C-0081", "baseScore": 4.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json b/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json index 1d3865a44..1fd491486 100644 --- a/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json +++ b/controls/C-0083-workloadswithcriticalvulnerabilitiesexposedtoexternaltraffic.json @@ -24,5 +24,11 @@ "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort services and checks image vulnerability information to see if the image has critical vulnerabilities.", "controlID": "C-0083", "baseScore": 8.0, - "example": "@controls/examples/c83.yaml" + "example": "@controls/examples/c83.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json b/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json index 716bdb8a0..40261f934 100644 --- a/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json +++ b/controls/C-0084-workloadswithrcevulnerabilitiesexposedtoexternaltraffic.json @@ -25,5 +25,11 @@ "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort service and checks the image vulnerability information for the RCE vulnerability.", "controlID": "C-0084", "baseScore": 8.0, - "example": "@controls/examples/c84.yaml" + "example": "@controls/examples/c84.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json b/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json index 21bff4fb5..c15b725c1 100644 --- a/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json +++ b/controls/C-0085-workloadswithexcessiveamountofvulnerabilities.json @@ -26,5 +26,11 @@ "test": "This control enumerates workloads and checks if they have excessive amount of vulnerabilities in their container images. The threshold of \u201cexcessive number\u201d is configurable.", "controlID": "C-0085", "baseScore": 6.0, - "example": "@controls/examples/c85.yaml" + "example": "@controls/examples/c85.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0086-cve20220492cgroupscontainerescape.json b/controls/C-0086-cve20220492cgroupscontainerescape.json index 87adce633..e81555b96 100644 --- a/controls/C-0086-cve20220492cgroupscontainerescape.json +++ b/controls/C-0086-cve20220492cgroupscontainerescape.json @@ -24,5 +24,10 @@ "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", "controlID": "C-0086", "baseScore": 4.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0087-cve202223648containerdfsescape.json b/controls/C-0087-cve202223648containerdfsescape.json index 3afc61495..48dba9cc1 100644 --- a/controls/C-0087-cve202223648containerdfsescape.json +++ b/controls/C-0087-cve202223648containerdfsescape.json @@ -24,5 +24,10 @@ "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", "controlID": "C-0087", "baseScore": 7.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0088-rbacenabled.json b/controls/C-0088-rbacenabled.json index f7de64162..a02d2eb8b 100644 --- a/controls/C-0088-rbacenabled.json +++ b/controls/C-0088-rbacenabled.json @@ -25,5 +25,10 @@ "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", "controlID": "C-0088", - "baseScore": 7.0 + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0089-cve20223172aggregatedapiserverredirect.json b/controls/C-0089-cve20223172aggregatedapiserverredirect.json index 55ee727bc..203659957 100644 --- a/controls/C-0089-cve20223172aggregatedapiserverredirect.json +++ b/controls/C-0089-cve20223172aggregatedapiserverredirect.json @@ -16,5 +16,10 @@ "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", "controlID": "C-0089", "baseScore": 3.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0090-cve202239328grafanaauthbypass.json b/controls/C-0090-cve202239328grafanaauthbypass.json index a1473241c..5a4135a6b 100644 --- a/controls/C-0090-cve202239328grafanaauthbypass.json +++ b/controls/C-0090-cve202239328grafanaauthbypass.json @@ -24,5 +24,11 @@ "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", "controlID": "C-0090", "baseScore": 9.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0091-cve202247633kyvernosignaturebypass.json b/controls/C-0091-cve202247633kyvernosignaturebypass.json index 60ef7dd8b..4ef15be82 100644 --- a/controls/C-0091-cve202247633kyvernosignaturebypass.json +++ b/controls/C-0091-cve202247633kyvernosignaturebypass.json @@ -24,5 +24,11 @@ "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", "controlID": "C-0091", "baseScore": 8.0, - "example": "" + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 29c505749..91f27adc5 100644 --- a/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0092-ensurethattheapiserverpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`." + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json index 037c1d45b..8e9c60afa 100644 --- a/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0093-ensurethattheapiserverpodspecificationfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`." + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 5262f94db..64cc28b75 100644 --- a/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0094-ensurethatthecontrollermanagerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`." + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json index 0fe521751..71c988bce 100644 --- a/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0095-ensurethatthecontrollermanagerpodspecificationfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`." + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json index 1e42e9d83..b4576ff90 100644 --- a/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0096-ensurethattheschedulerpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`." + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json index 78968d9d6..bdf5b6ea9 100644 --- a/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0097-ensurethattheschedulerpodspecificationfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`." + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json index d24d46f3e..29ccd4994 100644 --- a/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0098-ensurethattheetcdpodspecificationfilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`." + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json index 54816d4f9..76f891149 100644 --- a/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json +++ b/controls/C-0099-ensurethattheetcdpodspecificationfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`." + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json index fcd1438cc..2a7e634b8 100644 --- a/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0100-ensurethatthecontainernetworkinterfacefilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "NA" + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json index b3aefb8d6..f6d363fc2 100644 --- a/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json +++ b/controls/C-0101-ensurethatthecontainernetworkinterfacefileownershipissettorootroot.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "NA" + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json index 497a65361..14044b482 100644 --- a/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json +++ b/controls/C-0102-ensurethattheetcddatadirectorypermissionsaresetto700ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`." + "default_value": "By default, etcd data directory has permissions of `755`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json index bbade40f5..ce3f94e86 100644 --- a/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json +++ b/controls/C-0103-ensurethattheetcddatadirectoryownershipissettoetcdetcd.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`." + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json index b9feeccda..9c441fec6 100644 --- a/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json +++ b/controls/C-0104-ensurethattheadminconffilepermissionsaresetto600.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`." + "default_value": "By default, admin.conf has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json index 0196d372a..e2dfcaa6a 100644 --- a/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json +++ b/controls/C-0105-ensurethattheadminconffileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`." + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json index 5163d0d6c..9f5648027 100644 --- a/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0106-ensurethattheschedulerconffilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`." + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json index f916fea2e..58b4a19e1 100644 --- a/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json +++ b/controls/C-0107-ensurethattheschedulerconffileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`." + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json index af87f8a4f..6e192c44f 100644 --- a/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0108-ensurethatthecontrollermanagerconffilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`." + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json index db995d88a..dc9b29da3 100644 --- a/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json +++ b/controls/C-0109-ensurethatthecontrollermanagerconffileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`." + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json index 2a0c384da..e687d668e 100644 --- a/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json +++ b/controls/C-0110-ensurethatthekubernetespkidirectoryandfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 8, "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user." + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json index efa3e7de8..2b7e5d090 100644 --- a/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0111-ensurethatthekubernetespkicertificatefilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 8, "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`" + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json index 9fdd0a8c9..c36909123 100644 --- a/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json +++ b/controls/C-0112-ensurethatthekubernetespkikeyfilepermissionsaresetto600.json @@ -16,5 +16,10 @@ }, "baseScore": 8, "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`" + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json index f92a9c7ca..2daf3acf3 100644 --- a/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json +++ b/controls/C-0113-ensurethattheapiserveranonymousauthargumentissettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled." + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json index 41b561dea..2ce01e759 100644 --- a/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json +++ b/controls/C-0114-ensurethattheapiservertokenauthfileparameterisnotset.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set." + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json index b62a9c2a4..fee0e6c2f 100644 --- a/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json +++ b/controls/C-0115-ensurethattheapiserverdenyserviceexternalipsisnotset.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set." + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json index 2b59db188..0d2f637a0 100644 --- a/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json +++ b/controls/C-0116-ensurethattheapiserverkubeletclientcertificateandkubeletclientkeyargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set." + "default_value": "By default, certificate-based kubelet authentication is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json index 800c407fa..3859dd9d6 100644 --- a/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json +++ b/controls/C-0117-ensurethattheapiserverkubeletcertificateauthorityargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set." + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json index fe54c562a..1b9f8a438 100644 --- a/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0118-ensurethattheapiserverauthorizationmodeargumentisnotsettoalwaysallow.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled." + "default_value": "By default, `AlwaysAllow` is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json index d7979cd8f..4be1b2b53 100644 --- a/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json +++ b/controls/C-0119-ensurethattheapiserverauthorizationmodeargumentincludesnode.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled." + "default_value": "By default, `Node` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json index 61b308466..339d28c56 100644 --- a/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json +++ b/controls/C-0120-ensurethattheapiserverauthorizationmodeargumentincludesrbac.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled." + "default_value": "By default, `RBAC` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json index 3c16cb29b..635eb95a1 100644 --- a/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json +++ b/controls/C-0121-ensurethattheadmissioncontrolplugineventratelimitisset.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set." + "default_value": "By default, `EventRateLimit` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json index 691cd0478..c7dfd4ba8 100644 --- a/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json +++ b/controls/C-0122-ensurethattheadmissioncontrolpluginalwaysadmitisnotset.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins." + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json index 5d6c3d5c1..d3dad3695 100644 --- a/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json +++ b/controls/C-0123-ensurethattheadmissioncontrolpluginalwayspullimagesisset.json @@ -15,6 +15,11 @@ "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" ], "baseScore": 4, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set." + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json index 39ee86e50..684094226 100644 --- a/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json +++ b/controls/C-0124-ensurethattheadmissioncontrolpluginsecuritycontextdenyissetifpodsecuritypolicyisnotused.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set." + "default_value": "By default, `SecurityContextDeny` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json index 9787d8963..4cc686bf9 100644 --- a/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json +++ b/controls/C-0125-ensurethattheadmissioncontrolpluginserviceaccountisset.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set." + "default_value": "By default, `ServiceAccount` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json index 8f988da56..1766652f8 100644 --- a/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json +++ b/controls/C-0126-ensurethattheadmissioncontrolpluginnamespacelifecycleisset.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set." + "default_value": "By default, `NamespaceLifecycle` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json index 15e846b00..83c366142 100644 --- a/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json +++ b/controls/C-0127-ensurethattheadmissioncontrolpluginnoderestrictionisset.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set." + "default_value": "By default, `NodeRestriction` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json index 5fedbcf2b..2881d635a 100644 --- a/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json +++ b/controls/C-0128-ensurethattheapiserversecureportargumentisnotsetto0.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port." + "default_value": "By default, port 6443 is used as the secure port.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json index 4273db688..d17ef42c1 100644 --- a/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json +++ b/controls/C-0129-ensurethattheapiserverprofilingargumentissettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled." + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json index a40c906e0..1a3b66214 100644 --- a/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json +++ b/controls/C-0130-ensurethattheapiserverauditlogpathargumentisset.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "None", - "default_value": "By default, auditing is not enabled." + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json index 7c7e13a24..44879a754 100644 --- a/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json +++ b/controls/C-0131-ensurethattheapiserverauditlogmaxageargumentissetto30orasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, auditing is not enabled." + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json index ff49b3837..0a8cacf26 100644 --- a/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json +++ b/controls/C-0132-ensurethattheapiserverauditlogmaxbackupargumentissetto10orasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, auditing is not enabled." + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json index 0f72ae0e8..227da02f9 100644 --- a/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json +++ b/controls/C-0133-ensurethattheapiserverauditlogmaxsizeargumentissetto100orasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, auditing is not enabled." + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json index 102cd3048..17618be9d 100644 --- a/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json +++ b/controls/C-0134-ensurethattheapiserverrequesttimeoutargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds." + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json index 512dc2c50..4e0e142de 100644 --- a/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json +++ b/controls/C-0135-ensurethattheapiserverserviceaccountlookupargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`." + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json index 86e12f6e2..ad1de39a9 100644 --- a/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json +++ b/controls/C-0136-ensurethattheapiserverserviceaccountkeyfileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set." + "default_value": "By default, `--service-account-key-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json index 8dd8fbfd9..d5865e2d9 100644 --- a/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0137-ensurethattheapiserveretcdcertfileandetcdkeyfileargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set" + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index 0ff4b2f10..e94570e51 100644 --- a/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0138-ensurethattheapiservertlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set." + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json index 61b2ad914..261ce7686 100644 --- a/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json +++ b/controls/C-0139-ensurethattheapiserverclientcafileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set." + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json index 5e3dd28c0..041c9828f 100644 --- a/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json +++ b/controls/C-0140-ensurethattheapiserveretcdcafileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set." + "default_value": "By default, `--etcd-cafile` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json index 5148b3b4b..29ccc962b 100644 --- a/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json +++ b/controls/C-0141-ensurethattheapiserverencryptionproviderconfigargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set." + "default_value": "By default, `--encryption-provider-config` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json old mode 100755 new mode 100644 index f5b7f3161..489cbf68d --- a/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json +++ b/controls/C-0142-ensurethatencryptionprovidersareappropriatelyconfigured.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "None", - "default_value": "By default, no encryption provider is set." + "default_value": "By default, no encryption provider is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json index 05d130de9..433741857 100644 --- a/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0143-ensurethattheapiserveronlymakesuseofstrongcryptographicciphers.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers" + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json index 02deb93a9..b40756810 100644 --- a/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json +++ b/controls/C-0144-ensurethatthecontrollermanagerterminatedpodgcthresholdargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`." + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json index d131c166e..b2aab0b60 100644 --- a/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json +++ b/controls/C-0145-ensurethatthecontrollermanagerprofilingargumentissettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled." + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json index 201741a3b..01d0c40a7 100644 --- a/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json +++ b/controls/C-0146-ensurethatthecontrollermanageruseserviceaccountcredentialsargumentissettotrue.json @@ -15,6 +15,11 @@ "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" ], "baseScore": 4, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false." + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json index 34391e981..aa89f815d 100644 --- a/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json +++ b/controls/C-0147-ensurethatthecontrollermanagerserviceaccountprivatekeyfileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set." + "default_value": "By default, `--service-account-private-key-file` it not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json index 732efea67..300bf74ca 100644 --- a/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json +++ b/controls/C-0148-ensurethatthecontrollermanagerrootcafileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set." + "default_value": "By default, `--root-ca-file` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json index 1857ebfae..6c8bc9480 100644 --- a/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0149-ensurethatthecontrollermanagerrotatekubeletservercertificateargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled." + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json index 7a945a54d..a8662bbea 100644 --- a/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json +++ b/controls/C-0150-ensurethatthecontrollermanagerbindaddressargumentissetto127001.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0" + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json index 130765e2f..5e7ac8f70 100644 --- a/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json +++ b/controls/C-0151-ensurethattheschedulerprofilingargumentissettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled." + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json index e0307b832..454948197 100644 --- a/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json +++ b/controls/C-0152-ensurethattheschedulerbindaddressargumentissetto127001.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0" + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json index 8660ac500..863bedcc0 100644 --- a/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0153-ensurethatthecertfileandkeyfileargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set." + "default_value": "By default, TLS encryption is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json index e7293c739..f2c8e7f29 100644 --- a/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json +++ b/controls/C-0154-ensurethattheclientcertauthargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients." + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json index 1deb05e24..4f9cca9c9 100644 --- a/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json +++ b/controls/C-0155-ensurethattheautotlsargumentisnotsettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`." + "default_value": "By default, `--auto-tls` is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json index 9d3e870e2..ffbc1e555 100644 --- a/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json +++ b/controls/C-0156-ensurethatthepeercertfileandpeerkeyfileargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured." + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json index 9bd42cf2b..36af3a308 100644 --- a/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json +++ b/controls/C-0157-ensurethatthepeerclientcertauthargumentissettotrue.json @@ -3,8 +3,8 @@ "name": "Ensure that the --peer-client-cert-auth argument is set to true", "description": "etcd should be configured for peer authentication.", "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-client-cert-auth` argument is set to `true`.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", "references": [ "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" ], @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`." + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json index f42310f52..bca0fd59c 100644 --- a/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json +++ b/controls/C-0158-ensurethatthepeerautotlsargumentisnotsettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`." + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json index 33930b336..d8da5abc9 100644 --- a/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json +++ b/controls/C-0159-ensurethatauniquecertificateauthorityisusedforetcd.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used." + "default_value": "By default, no etcd certificate is created and used.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json index ea5c87132..7bfe4b5d9 100644 --- a/controls/C-0160-ensurethataminimalauditpolicyiscreated.json +++ b/controls/C-0160-ensurethataminimalauditpolicyiscreated.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out." + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json index c3e5561c3..f597659bd 100644 --- a/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json +++ b/controls/C-0161-ensurethattheauditpolicycoverskeysecurityconcerns.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information." + "default_value": "By default Kubernetes clusters do not log audit information.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json index dad6b4b7a..70be494e0 100644 --- a/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0162-ensurethatthekubeletservicefilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`." + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json index dc6dd7929..694577aaf 100644 --- a/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json +++ b/controls/C-0163-ensurethatthekubeletservicefileownershipissettorootroot.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`." + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json index 0b3b06476..b28c60793 100644 --- a/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0164-ifproxykubeconfigfileexistsensurepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`." + "default_value": "By default, proxy file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json index d07a91bec..3c9299768 100644 --- a/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json +++ b/controls/C-0165-ifproxykubeconfigfileexistsensureownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`." + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json index 3bf43c320..e2eecd7fb 100644 --- a/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0166-ensurethatthekubeconfigkubeletconffilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`." + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json index d33811b28..231d75b9e 100644 --- a/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json +++ b/controls/C-0167-ensurethatthekubeconfigkubeletconffileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 6, "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`." + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json index a63d14440..7405e5f67 100644 --- a/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json +++ b/controls/C-0168-ensurethatthecertificateauthoritiesfilepermissionsaresetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified." + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json index 1871fadbd..118cea793 100644 --- a/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json +++ b/controls/C-0169-ensurethattheclientcertificateauthoritiesfileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified." + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json index 4b7a4aaf8..65e6df288 100644 --- a/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json +++ b/controls/C-0170-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatepermissionssetto600ormorerestrictive.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600." + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json index a28421267..6643a4567 100644 --- a/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json +++ b/controls/C-0171-ifthekubeletconfigyamlconfigurationfileisbeingusedvalidatefileownershipissettorootroot.json @@ -16,5 +16,10 @@ }, "baseScore": 7, "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`." + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json index 36fdc47c8..afb73a3d1 100644 --- a/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json +++ b/controls/C-0172-ensurethattheanonymousauthargumentissettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled." + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json index dafe7c479..95d819b14 100644 --- a/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json +++ b/controls/C-0173-ensurethattheauthorizationmodeargumentisnotsettoalwaysallow.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`." + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json index a53f72938..b54f4ef0c 100644 --- a/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json +++ b/controls/C-0174-ensurethattheclientcafileargumentissetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set." + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json index 36df8f3ea..00fbbe55c 100644 --- a/controls/C-0175-verifythatthereadonlyportargumentissetto0.json +++ b/controls/C-0175-verifythatthereadonlyportargumentissetto0.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0." + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json index 2565a0401..faa9a0e53 100644 --- a/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json +++ b/controls/C-0176-ensurethatthestreamingconnectionidletimeoutargumentisnotsetto0.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours." + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json index 38163513b..60df7915c 100644 --- a/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json +++ b/controls/C-0177-ensurethattheprotectkerneldefaultsargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 2, "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set." + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json index c6d33426b..9c78504e7 100644 --- a/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json +++ b/controls/C-0178-ensurethatthemakeiptablesutilchainsargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`." + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json index 1251bbca8..4f9a9dce1 100644 --- a/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json +++ b/controls/C-0179-ensurethatthehostnameoverrideargumentisnotset.json @@ -16,5 +16,10 @@ ], "baseScore": 3, "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set." + "default_value": "By default, `--hostname-override` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json index 7725b08a3..a8b72b464 100644 --- a/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json +++ b/controls/C-0180-ensurethattheeventqpsargumentissetto0oralevelwhichensuresappropriateeventcapture.json @@ -16,5 +16,10 @@ ], "baseScore": 2, "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`." + "default_value": "By default, `--event-qps` argument is set to `5`.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json index 00e21479d..d6b510a03 100644 --- a/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json +++ b/controls/C-0181-ensurethatthetlscertfileandtlsprivatekeyfileargumentsaresetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json index c029c9820..052d6b729 100644 --- a/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json +++ b/controls/C-0182-ensurethattherotatecertificatesargumentisnotsettofalse.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled." + "default_value": "By default, kubelet client certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json index f9ed078ce..e499f14a1 100644 --- a/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json +++ b/controls/C-0183-verifythattherotatekubeletservercertificateargumentissettotrue.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled." + "default_value": "By default, kubelet server certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json index 3638f1bba..c466106ea 100644 --- a/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json +++ b/controls/C-0184-ensurethatthekubeletonlymakesuseofstrongcryptographicciphers.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers" + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json index f9e375f5b..9e489d508 100644 --- a/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json +++ b/controls/C-0185-ensurethattheclusteradminroleisonlyusedwhererequired.json @@ -17,5 +17,10 @@ ], "baseScore": 8, "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal." + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0186-minimizeaccesstosecrets.json b/controls/C-0186-minimizeaccesstosecrets.json index 91e986267..96bffaf00 100644 --- a/controls/C-0186-minimizeaccesstosecrets.json +++ b/controls/C-0186-minimizeaccesstosecrets.json @@ -17,5 +17,10 @@ ], "baseScore": 6, "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```" + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json index cf87adde5..4024017ec 100644 --- a/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json +++ b/controls/C-0187-minimizewildcarduseinrolesandclusterroles.json @@ -17,5 +17,11 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0188-minimizeaccesstocreatepods.json b/controls/C-0188-minimizeaccesstocreatepods.json index e1180d360..54a69ac15 100644 --- a/controls/C-0188-minimizeaccesstocreatepods.json +++ b/controls/C-0188-minimizeaccesstocreatepods.json @@ -17,5 +17,10 @@ ], "baseScore": 5, "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```" + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json index 47a5118cf..30360224c 100644 --- a/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json +++ b/controls/C-0189-ensurethatdefaultserviceaccountsarenotactivelyused.json @@ -18,5 +18,10 @@ ], "baseScore": 5, "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace." + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json index e09f66ea9..981e6b763 100644 --- a/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json +++ b/controls/C-0190-ensurethatserviceaccounttokensareonlymountedwherenecessary.json @@ -17,5 +17,10 @@ ], "baseScore": 5, "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them." + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json index 8e6c0a96b..4dd40767b 100644 --- a/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json +++ b/controls/C-0191-limituseofthebindimpersonateandescalatepermissionsinthekubernetescluster.json @@ -17,5 +17,10 @@ ], "baseScore": 6, "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate." + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json index 094941636..ccbe332e9 100644 --- a/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json +++ b/controls/C-0192-ensurethattheclusterhasatleastoneactivepolicycontrolmechanisminplace.json @@ -17,5 +17,10 @@ ], "baseScore": 4, "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place." + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json index d39da0d2c..3a2c53907 100644 --- a/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0193-minimizetheadmissionofprivilegedcontainers.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers." + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index 17485c735..c60532eb7 100644 --- a/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0194-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers." + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index b35c89718..c3b172c8f 100644 --- a/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0195-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers." + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index 5428a8b9d..e725cc6ca 100644 --- a/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0196-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers." + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index 2007bce78..0d1387315 100644 --- a/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0197-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container." + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0198-minimizetheadmissionofrootcontainers.json b/controls/C-0198-minimizetheadmissionofrootcontainers.json index 6f7e958f7..c5868cd23 100644 --- a/controls/C-0198-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0198-minimizetheadmissionofrootcontainers.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root." + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json index c0103bb87..ca4f202e7 100644 --- a/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json +++ b/controls/C-0199-minimizetheadmissionofcontainerswiththenet_rawcapability.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability." + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json index 0b761a97a..b13f3c92a 100644 --- a/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0200-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers." + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index b237ff336..397e4721e 100644 --- a/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0201-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities" + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json index 582c3d493..ab7562f7f 100644 --- a/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json +++ b/controls/C-0202-minimizetheadmissionofwindowshostprocesscontainers.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers." + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json index e51df920e..a38ba19e6 100644 --- a/controls/C-0203-minimizetheadmissionofhostpathvolumes.json +++ b/controls/C-0203-minimizetheadmissionofhostpathvolumes.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes." + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json index 2c40e37a1..fe9618d62 100644 --- a/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json +++ b/controls/C-0204-minimizetheadmissionofcontainerswhichusehostports.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts." + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json index d1741718a..cb279da9e 100644 --- a/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json +++ b/controls/C-0205-ensurethatthecniinusesupportsnetworkpolicies.json @@ -16,5 +16,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use." + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json index c5656d2ac..6b3c0a6d3 100644 --- a/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json +++ b/controls/C-0206-ensurethatallnamespaceshavenetworkpoliciesdefined.json @@ -17,5 +17,10 @@ ], "baseScore": 4, "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created." + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json index eebf58006..633e7245f 100644 --- a/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json +++ b/controls/C-0207-preferusingsecretsasfilesoversecretsasenvironmentvariables.json @@ -17,5 +17,10 @@ ], "baseScore": 4, "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined" + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0208-considerexternalsecretstorage.json b/controls/C-0208-considerexternalsecretstorage.json index f9058c757..6add1830c 100644 --- a/controls/C-0208-considerexternalsecretstorage.json +++ b/controls/C-0208-considerexternalsecretstorage.json @@ -16,5 +16,10 @@ "armoBuiltin": true }, "rulesNames": ["external-secret-storage"], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json index cada4c4a2..775cc42fa 100644 --- a/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json +++ b/controls/C-0209-createadministrativeboundariesbetweenresourcesusingnamespaces.json @@ -17,5 +17,10 @@ ], "baseScore": 5, "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-node-lease` - Namespace used for node heartbeats\n4. `kube-public` - Namespace used for public information in a cluster" + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json index bcdaf0e8b..6a0d96986 100644 --- a/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json +++ b/controls/C-0210-ensurethattheseccompprofileissettodockerdefaultinyourpoddefinitions.json @@ -17,5 +17,10 @@ ], "baseScore": 4, "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled." + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json index c092391d0..a455b8b6e 100644 --- a/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json +++ b/controls/C-0211-applysecuritycontexttoyourpodsandcontainers.json @@ -19,13 +19,18 @@ "drop-capability-netraw", "set-seLinuxOptions", "set-seccomp-profile", - "set-procmount-default", - "set-fsgroup-value", - "set-fsgroupchangepolicy-value", - "set-systctls-params", - "set-supplementalgroups-values" + "set-procmount-default", + "set-fsgroup-value", + "set-fsgroupchangepolicy-value", + "set-systctls-params", + "set-supplementalgroups-values" ], "baseScore": 8, "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods." -} + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + } +} \ No newline at end of file diff --git a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json index fe4774531..e6e976b35 100644 --- a/controls/C-0212-thedefaultnamespaceshouldnotbeused.json +++ b/controls/C-0212-thedefaultnamespaceshouldnotbeused.json @@ -24,5 +24,10 @@ ], "baseScore": 4, "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used" + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json b/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json index 7da58723a..73ecf84f5 100644 --- a/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json +++ b/controls/C-0213-minimizetheadmissionofprivilegedcontainers.json @@ -17,5 +17,10 @@ ], "baseScore": 8.0, "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```" + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json b/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json index d61f1dd7f..ce11828ec 100644 --- a/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json +++ b/controls/C-0214-minimizetheadmissionofcontainerswishingtosharethehostprocessidnamespace.json @@ -16,5 +16,11 @@ ], "baseScore": 5.0, "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json b/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json index 82508dd7c..d4e00efb6 100644 --- a/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json +++ b/controls/C-0215-minimizetheadmissionofcontainerswishingtosharethehostipcnamespace.json @@ -16,5 +16,11 @@ ], "baseScore": 5.0, "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json b/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json index d3e40fe00..05a610d69 100644 --- a/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json +++ b/controls/C-0216-minimizetheadmissionofcontainerswishingtosharethehostnetworknamespace.json @@ -16,5 +16,11 @@ ], "baseScore": 5.0, "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json b/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json index 3f8916bd9..ebb9620ad 100644 --- a/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json +++ b/controls/C-0217-minimizetheadmissionofcontainerswithallowprivilegeescalation.json @@ -16,5 +16,11 @@ ], "baseScore": 6.0, "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0218-minimizetheadmissionofrootcontainers.json b/controls/C-0218-minimizetheadmissionofrootcontainers.json index 96e1ca2d1..09bdb7782 100644 --- a/controls/C-0218-minimizetheadmissionofrootcontainers.json +++ b/controls/C-0218-minimizetheadmissionofrootcontainers.json @@ -16,5 +16,11 @@ ], "baseScore": 6.0, "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json b/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json index 8b54df3b5..dd3beb276 100644 --- a/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json +++ b/controls/C-0219-minimizetheadmissionofcontainerswithaddedcapabilities.json @@ -17,5 +17,11 @@ ], "baseScore": 5.0, "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default." + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json b/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json index 3a31d41c6..adbedb4fa 100644 --- a/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json +++ b/controls/C-0220-minimizetheadmissionofcontainerswithcapabilitiesassigned.json @@ -17,5 +17,11 @@ ], "baseScore": 5.0, "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined." + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json b/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json index 1ef552dfa..965ca6348 100644 --- a/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json +++ b/controls/C-0221-ensureimagevulnerabilityscanningusingamazonecrimagescanningorathirdpartyprovider.json @@ -15,6 +15,12 @@ "ensure-image-scanning-enabled-cloud" ], "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR\n\n The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API.\n\n UnsupportedImageError\nYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image.\n\n An UNDEFINED severity level is returned\nYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this:\n\n The vulnerability was not assigned a priority by the CVE source.\n\n The vulnerability was assigned a priority that Amazon ECR did not recognize.\n\n To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default." + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0222-minimizeuseraccesstoamazonecr.json b/controls/C-0222-minimizeuseraccesstoamazonecr.json index f14252d4e..9b07514fc 100644 --- a/controls/C-0222-minimizeuseraccesstoamazonecr.json +++ b/controls/C-0222-minimizeuseraccesstoamazonecr.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "" -} + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json b/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json index 141a5f9f5..48038f40d 100644 --- a/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json +++ b/controls/C-0223-minimizeclusteraccesstoreadonlyforamazonecr.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default." -} + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + } +} \ No newline at end of file diff --git a/controls/C-0225-preferusingdedicatedeksserviceaccounts.json b/controls/C-0225-preferusingdedicatedeksserviceaccounts.json index 80d4e8f82..937fe41f0 100644 --- a/controls/C-0225-preferusingdedicatedeksserviceaccounts.json +++ b/controls/C-0225-preferusingdedicatedeksserviceaccounts.json @@ -19,5 +19,10 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json b/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json index 06303bf51..7090d5651 100644 --- a/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json +++ b/controls/C-0226-preferusingacontaineroptimizedoswhenpossible.json @@ -17,5 +17,10 @@ ], "baseScore": 3, "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default." + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json index 8b9159e1d..d3f811ee0 100644 --- a/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0227-restrictaccesstothecontrolplaneendpoint.json @@ -14,5 +14,10 @@ "rulesNames": ["ensure-endpointprivateaccess-is-enabled"], "baseScore": 8.0, "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled." + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + } } \ No newline at end of file diff --git a/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index b11c9be1b..a761077d7 100644 --- a/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0228-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -14,5 +14,10 @@ "rulesNames": ["ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks"], "baseScore": 8.0, "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled." + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json index 75d8e9547..2385ad0a2 100644 --- a/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0229-ensureclustersarecreatedwithprivatenodes.json @@ -12,5 +12,10 @@ "rulesNames": ["ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks"], "baseScore": 8.0, "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json index 4147fb51c..14d578d16 100644 --- a/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0230-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -12,5 +12,10 @@ "rulesNames": ["ensure-network-policy-is-enabled-eks"], "baseScore": 6.0, "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled." + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json index c6f4676dc..d38d84530 100644 --- a/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0231-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -14,5 +14,10 @@ "rulesNames": ["ensure-https-loadbalancers-encrypted-with-tls-aws"], "baseScore": 5.0, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json b/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json index 96b7545e5..d09998ff6 100644 --- a/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json +++ b/controls/C-0232-managekubernetesrbacuserswithawsiamauthenticatorforkubernetesorupgradetoawscliv116156.json @@ -17,5 +17,10 @@ ], "baseScore": 7, "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane" + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0233-considerfargateforrunninguntrustedworkloads.json b/controls/C-0233-considerfargateforrunninguntrustedworkloads.json index 0e42b275a..61e0d1910 100644 --- a/controls/C-0233-considerfargateforrunninguntrustedworkloads.json +++ b/controls/C-0233-considerfargateforrunninguntrustedworkloads.json @@ -14,7 +14,12 @@ "rulesNames": [ "alert-fargate-not-in-use" ], - "baseScore": 3.0, + "baseScore": 3, "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized." + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0234-considerexternalsecretstorage.json b/controls/C-0234-considerexternalsecretstorage.json index feb333066..93551b69a 100644 --- a/controls/C-0234-considerexternalsecretstorage.json +++ b/controls/C-0234-considerexternalsecretstorage.json @@ -12,5 +12,10 @@ "rulesNames": ["ensure-external-secrets-storage-is-in-use"], "baseScore": 6.0, "impact_statement": "None", - "default_value": "By default, no external secret management is configured." + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json b/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json index 3a5c66ff9..c651e3ff7 100644 --- a/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json +++ b/controls/C-0235-ensurethatthekubeletconfigurationfilehaspermissionssetto644ormorerestrictive.json @@ -16,5 +16,10 @@ ], "baseScore": 6.0, "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value." + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0236-verifyimagesignature.json b/controls/C-0236-verifyimagesignature.json index 3502fe442..539227d6f 100644 --- a/controls/C-0236-verifyimagesignature.json +++ b/controls/C-0236-verifyimagesignature.json @@ -16,5 +16,11 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0237-hasimagesignature.json b/controls/C-0237-hasimagesignature.json index 2830dc6c7..41d75ca92 100644 --- a/controls/C-0237-hasimagesignature.json +++ b/controls/C-0237-hasimagesignature.json @@ -15,5 +15,11 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json b/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json index 1e07f0432..9852350e2 100644 --- a/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json +++ b/controls/C-0238-ensurethatthekubeconfigfilepermissionsaresetto644ormorerestrictive.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value." + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json index e7c844619..a2159db1f 100644 --- a/controls/C-0239-preferusingdedicatedaksserviceaccounts.json +++ b/controls/C-0239-preferusingdedicatedaksserviceaccounts.json @@ -16,5 +16,10 @@ ], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json index 0895e1844..5faee94ea 100644 --- a/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json +++ b/controls/C-0240-ensurenetworkpolicyisenabledandsetasappropriate.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled." -} + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0241-useazurerbacforkubernetesauthorization.json b/controls/C-0241-useazurerbacforkubernetesauthorization.json index edf80d5c9..c49ccdac6 100644 --- a/controls/C-0241-useazurerbacforkubernetesauthorization.json +++ b/controls/C-0241-useazurerbacforkubernetesauthorization.json @@ -14,5 +14,10 @@ "rulesNames": ["ensure-azure-rbac-is-set"], "baseScore": 7, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0242-hostilemultitenantworkloads.json b/controls/C-0242-hostilemultitenantworkloads.json index 7ace70153..62e98a3bb 100644 --- a/controls/C-0242-hostilemultitenantworkloads.json +++ b/controls/C-0242-hostilemultitenantworkloads.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "", - "default_value": "" -} + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json index 4f4c19986..daa7c6d2e 100644 --- a/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json +++ b/controls/C-0243-ensureimagevulnerabilityscanningusingazuredefenderimagescanningorathirdpartyprovider.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default." -} + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0244-ensurekubernetessecretsareencrypted.json b/controls/C-0244-ensurekubernetessecretsareencrypted.json index 4aa396696..3bb263573 100644 --- a/controls/C-0244-ensurekubernetessecretsareencrypted.json +++ b/controls/C-0244-ensurekubernetessecretsareencrypted.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "", - "default_value": "" -} + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json index f148cf7f1..d2c800b01 100644 --- a/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json +++ b/controls/C-0245-encrypttraffictohttpsloadbalancerswithtlscertificates.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0246-avoiduseofsystemmastersgroup.json b/controls/C-0246-avoiduseofsystemmastersgroup.json index 1b283d603..da7412a5a 100644 --- a/controls/C-0246-avoiduseofsystemmastersgroup.json +++ b/controls/C-0246-avoiduseofsystemmastersgroup.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations." -} + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + } +} \ No newline at end of file diff --git a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json index 1db6b7c0e..b9d730671 100644 --- a/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json +++ b/controls/C-0247-restrictaccesstothecontrolplaneendpoint.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled." -} + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json index 3da11d346..aca15749d 100644 --- a/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json +++ b/controls/C-0248-ensureclustersarecreatedwithprivatenodes.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "" -} + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0249-restrictuntrustedworkloads.json b/controls/C-0249-restrictuntrustedworkloads.json index 7cf717853..e2c5d3123 100644 --- a/controls/C-0249-restrictuntrustedworkloads.json +++ b/controls/C-0249-restrictuntrustedworkloads.json @@ -10,12 +10,17 @@ ], "attributes": { "armoBuiltin": true, - "actionRequired": "manual review" + "actionRequired": "manual review" }, "rulesNames": [ "rule-manual" ], "baseScore": 5, "impact_statement": "", - "default_value": "ACI is not a default component of the AKS" -} + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json index f62699f27..674e4c2b6 100644 --- a/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json +++ b/controls/C-0250-minimizeclusteraccesstoreadonlyforazurecontainerregistryacr.json @@ -1,20 +1,25 @@ { - "controlID": "C-0250", - "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-service-principle-has-read-only-permissions" - ], - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "" -} \ No newline at end of file + "controlID": "C-0250", + "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-service-principle-has-read-only-permissions" + ], + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} diff --git a/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json b/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json index 2515ea5ca..a8d5af221 100644 --- a/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json +++ b/controls/C-0251-minimizeuseraccesstoazurecontainerregistryacr.json @@ -16,5 +16,10 @@ ], "baseScore": 6, "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + } } \ No newline at end of file diff --git a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json index 10d39683d..15d9a5c14 100644 --- a/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json +++ b/controls/C-0252-ensureclustersarecreatedwithprivateendpointenabledandpublicaccessdisabled.json @@ -16,5 +16,10 @@ ], "baseScore": 8, "impact_statement": "", - "default_value": "" + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + } } \ No newline at end of file diff --git a/controls/C-0253-deprecated-k8s-registry.json b/controls/C-0253-deprecated-k8s-registry.json index 2e1f68036..35fa98c76 100644 --- a/controls/C-0253-deprecated-k8s-registry.json +++ b/controls/C-0253-deprecated-k8s-registry.json @@ -12,5 +12,11 @@ "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", "controlID": "C-0253", "baseScore": 5.0, - "example": "@controls/examples/c239.yaml" + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0254-enableauditlogs.json b/controls/C-0254-enableauditlogs.json index 47e9f2346..eb9ef642f 100644 --- a/controls/C-0254-enableauditlogs.json +++ b/controls/C-0254-enableauditlogs.json @@ -16,5 +16,10 @@ ], "baseScore": 5, "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged." -} + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + } +} \ No newline at end of file diff --git a/controls/C-0255-workloadwithsecretaccess.json b/controls/C-0255-workloadwithsecretaccess.json index 54098517e..686a72577 100644 --- a/controls/C-0255-workloadwithsecretaccess.json +++ b/controls/C-0255-workloadwithsecretaccess.json @@ -19,5 +19,11 @@ "rulesNames": ["workload-mounted-secrets"], "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", "controlID": "C-0255", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0256-exposuretointernet.json b/controls/C-0256-exposuretointernet.json index 35ebe5a93..fa652ee33 100644 --- a/controls/C-0256-exposuretointernet.json +++ b/controls/C-0256-exposuretointernet.json @@ -25,5 +25,11 @@ "rulesNames": ["exposure-to-internet"], "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", "controlID": "C-0256", - "baseScore": 7.0 + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } diff --git a/controls/C-0257-pvcaccess.json b/controls/C-0257-pvcaccess.json index d2ee83a1b..05a43cd02 100644 --- a/controls/C-0257-pvcaccess.json +++ b/controls/C-0257-pvcaccess.json @@ -19,5 +19,11 @@ "rulesNames": ["workload-mounted-pvc"], "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", "controlID": "C-0257", - "baseScore": 4.0 + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0258-configmapaccess.json b/controls/C-0258-configmapaccess.json index cf0e62870..b0a52d5b0 100644 --- a/controls/C-0258-configmapaccess.json +++ b/controls/C-0258-configmapaccess.json @@ -19,5 +19,11 @@ "rulesNames": ["workload-mounted-configmap"], "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", "controlID": "C-0258", - "baseScore": 5.0 + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0259-workloadwithcredentialaccess.json b/controls/C-0259-workloadwithcredentialaccess.json index ef60a80fe..9153afd41 100644 --- a/controls/C-0259-workloadwithcredentialaccess.json +++ b/controls/C-0259-workloadwithcredentialaccess.json @@ -19,5 +19,11 @@ "rulesNames": ["rule-credentials-in-env-var"], "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", "controlID": "C-0259", - "baseScore": 8.0 + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0260-missingnetworkpolicy.json b/controls/C-0260-missingnetworkpolicy.json index a2efffdbd..55e82f30d 100644 --- a/controls/C-0260-missingnetworkpolicy.json +++ b/controls/C-0260-missingnetworkpolicy.json @@ -19,5 +19,11 @@ "rulesNames": ["ensure_network_policy_configured_in_labels"], "test": "Check that all workloads has a network policy configured in labels.", "controlID": "C-0260", - "baseScore": 5.0 + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } \ No newline at end of file diff --git a/controls/C-0261-satokenmounted.json b/controls/C-0261-satokenmounted.json index f2216f5e6..5e438e4af 100644 --- a/controls/C-0261-satokenmounted.json +++ b/controls/C-0261-satokenmounted.json @@ -19,5 +19,11 @@ "rulesNames": ["serviceaccount-token-mount"], "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", "controlID": "C-0261", - "baseScore": 7.0 + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } diff --git a/controls/C-0262-anonymousaccessisenabled.json b/controls/C-0262-anonymousaccessisenabled.json index 4f6ed8aa0..4e99b429c 100644 --- a/controls/C-0262-anonymousaccessisenabled.json +++ b/controls/C-0262-anonymousaccessisenabled.json @@ -11,5 +11,11 @@ "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", "anonymous-access-enabled" ], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } diff --git a/go.work.sum b/go.work.sum index 1bb71c85d..ec584e775 100644 --- a/go.work.sum +++ b/go.work.sum @@ -4,19 +4,16 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -44,9 +41,7 @@ github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:m github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/assert/v2 v2.2.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA= @@ -315,7 +310,6 @@ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDB github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= @@ -447,7 +441,6 @@ github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -634,12 +627,9 @@ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -758,7 +748,6 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -871,6 +860,7 @@ golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -908,6 +898,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= @@ -938,6 +929,7 @@ k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= @@ -948,8 +940,10 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= diff --git a/scripts/validations.py b/scripts/validations.py index c660409eb..065acf370 100644 --- a/scripts/validations.py +++ b/scripts/validations.py @@ -26,6 +26,23 @@ def validate_controls_in_framework(): # validate control exists and name is according to convention assert control_id in CONTROLID_TO_FILENAME, f"No file found for Control ID {control_id}." +def validate_control_scanning_scope(control): + allowed_scopes = [["cluster", "file"], ["cluster"], ["cloud"], ["GKE"], ["EKS"], ["AKS"]] + controlID=control["controlID"] + + scanning_scopes = control["scanningScope"] + assert scanning_scopes != None, f"control {controlID} has no [\"scanningScope\"] field" + + scanning_scopes_match = scanning_scopes["matches"] + assert scanning_scopes != None, f"control {controlID} has no [\"scanningScope\"][\"matches\"] fields" + + scope_allowed_check = False + for allowed_scope in allowed_scopes: + if scanning_scopes_match == allowed_scope: + scope_allowed_check = True + break + assert scope_allowed_check == True, f"control {controlID} has no allowed scope" + # Test that each rule name in a control file has a corresponding rule file in the "rules" directory def validate_controls(): @@ -52,6 +69,7 @@ def validate_controls(): if not os.path.exists(os.path.join(RULES_DIR, rule_dir + "-v1")): validate_tests_dir_for_rule(rule_dir) RULES_CHECKED.add(rule_name) + validate_control_scanning_scope(control=control) # Test that each rule directory in the "rules" directory has a non-empty "tests" subdirectory @@ -69,6 +87,7 @@ def fill_controlID_to_filename_map(): # Load the JSON files if filename.endswith('.json'): with open(os.path.join(CONTROLS_DIR, filename)) as f1: + print(filename) cntl = json.load(f1) CONTROLID_TO_FILENAME[cntl['controlID']] = filename From 576fe5b4cd28d9991be8d1fea8c2abcc8445a020 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 12:51:08 +0300 Subject: [PATCH 2/8] just for system test - check new scope Signed-off-by: rcohencyberarmor --- gitregostore/datastructures.go | 2 +- releaseDev/ControlID_RuleName.csv | 276 + releaseDev/FWName_CID_CName.csv | 408 + releaseDev/allcontrols.json | 5496 ++++++++++++ releaseDev/armobest.json | 3358 +++++++ releaseDev/attack_tracks.json | 59 + releaseDev/cis-aks-t1.2.0.json | 4072 +++++++++ releaseDev/cis-eks-t1.2.0.json | 4299 +++++++++ releaseDev/cis-v1.23-t1.0.1.json | 7883 ++++++++++++++++ releaseDev/controls.json | 6728 ++++++++++++++ releaseDev/default_config_inputs.json | 140 + releaseDev/devopsbest.json | 987 ++ releaseDev/exceptions.json | 6854 ++++++++++++++ releaseDev/frameworks.json | 11419 ++++++++++++++++++++++++ releaseDev/mitre.json | 2616 ++++++ releaseDev/nsa.json | 2145 +++++ releaseDev/rules.json | 8953 +++++++++++++++++++ releaseDev/security.json | 1949 ++++ releaseDev/security_frameworks.json | 520 ++ 19 files changed, 68163 insertions(+), 1 deletion(-) create mode 100644 releaseDev/ControlID_RuleName.csv create mode 100644 releaseDev/FWName_CID_CName.csv create mode 100644 releaseDev/allcontrols.json create mode 100644 releaseDev/armobest.json create mode 100644 releaseDev/attack_tracks.json create mode 100644 releaseDev/cis-aks-t1.2.0.json create mode 100644 releaseDev/cis-eks-t1.2.0.json create mode 100644 releaseDev/cis-v1.23-t1.0.1.json create mode 100644 releaseDev/controls.json create mode 100644 releaseDev/default_config_inputs.json create mode 100644 releaseDev/devopsbest.json create mode 100644 releaseDev/exceptions.json create mode 100644 releaseDev/frameworks.json create mode 100644 releaseDev/mitre.json create mode 100644 releaseDev/nsa.json create mode 100644 releaseDev/rules.json create mode 100644 releaseDev/security.json create mode 100644 releaseDev/security_frameworks.json diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 7cd65964c..685d4b9da 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -99,7 +99,7 @@ func (gs *GitRegoStore) SetRegoObjects() error { // NewDefaultGitRegoStore - generates git store object for production regolibrary release files. // Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" func NewDefaultGitRegoStore(frequency int) *GitRegoStore { - gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releases", "latest/download", "", frequency) + gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releaseDev", "/", "scanning-scope-support", frequency) return gs } diff --git a/releaseDev/ControlID_RuleName.csv b/releaseDev/ControlID_RuleName.csv new file mode 100644 index 000000000..0ba1687a1 --- /dev/null +++ b/releaseDev/ControlID_RuleName.csv @@ -0,0 +1,276 @@ +ControlID,RuleName +C-0016,rule-allow-privilege-escalation +C-0174,enforce-kubelet-client-tls-authentication-updated +C-0100,ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive +C-0140,ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate +C-0015,rule-can-list-get-secrets +C-0015,rule-can-list-get-secrets-v1 +C-0164,if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive +C-0181,validate-kubelet-tls-configuration-updated +C-0192,pod-security-admission-applied +C-0017,immutable-container-filesystem +C-0144,ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate +C-0054,internal-networking +C-0170,if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive +C-0143,ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers +C-0199,pod-security-admission-baseline-applied +C-0200,pod-security-admission-restricted-applied +C-0152,ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1 +C-0014,rule-access-dashboard +C-0014,rule-access-dashboard-subject-v1 +C-0014,rule-access-dashboard-wl-v1 +C-0142,ensure-that-the-api-server-encryption-providers-are-appropriately-configured +C-0205,ensure-that-the-cni-in-use-supports-network-policies +C-0188,rule-can-create-pod +C-0036,list-all-validating-webhooks +C-0115,ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set +C-0065,rule-can-impersonate-users-groups +C-0065,rule-can-impersonate-users-groups-v1 +C-0133,ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate +C-0105,ensure-that-the-admin.conf-file-ownership-is-set-to-root-root +C-0249,rule-manual +C-0163,ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root +C-0034,automount-service-account +C-0254,rule-manual +C-0203,pod-security-admission-baseline-applied +C-0086,CVE-2022-0492 +C-0229,ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks +C-0083,exposed-critical-pods +C-0114,ensure-that-the-api-server-token-auth-file-parameter-is-not-set +C-0189,automount-default-service-account +C-0189,namespace-without-service-account +C-0215,psp-deny-hostipc +C-0176,kubelet-streaming-connection-idle-timeout +C-0099,ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root +C-0241,ensure-azure-rbac-is-set +C-0212,pods-in-default-namespace +C-0212,resources-rbac-in-default-namespace +C-0212,resources-core1-in-default-namespace +C-0212,resources-core2-in-default-namespace +C-0212,resources-other1-in-default-namespace +C-0212,resources-other2-in-default-namespace +C-0212,resources-secret-in-default-namespace +C-0212,resources-event-in-default-namespace +C-0138,ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate +C-0235,ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive +C-0001,rule-identify-blocklisted-image-registries +C-0078,container-image-repository +C-0102,ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive +C-0058,Symlink-Exchange-Can-Allow-Host-Filesystem-Access +C-0222,ensure-aws-policies-are-present +C-0031,rule-can-delete-k8s-events +C-0031,rule-can-delete-k8s-events-v1 +C-0147,ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate +C-0063,rule-can-portforward +C-0063,rule-can-portforward-v1 +C-0256,exposure-to-internet +C-0247,restrict-access-to-the-control-plane-endpoint +C-0248,ensure-clusters-are-created-with-private-nodes +C-0246,rule-manual +C-0239,ensure-default-service-accounts-has-only-default-roles +C-0153,etcd-tls-enabled +C-0013,non-root-containers +C-0213,psp-deny-privileged-container +C-0055,linux-hardening +C-0132,ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate +C-0244,secret-etcd-encryption-cloud +C-0190,automount-service-account +C-0127,ensure-that-the-admission-control-plugin-NodeRestriction-is-set +C-0183,kubelet-rotate-kubelet-server-certificate +C-0117,ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate +C-0141,ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate +C-0198,pod-security-admission-restricted-applied +C-0093,ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root +C-0169,ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root +C-0123,ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set +C-0116,ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate +C-0193,pod-security-admission-baseline-applied +C-0077,K8s common labels usage +C-0148,ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate +C-0204,pod-security-admission-baseline-applied +C-0038,host-pid-ipc-privileges +C-0245,encrypt-traffic-to-https-load-balancers-with-tls-certificates +C-0261,serviceaccount-token-mount +C-0113,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false +C-0258,workload-mounted-configmap +C-0045,alert-rw-hostpath +C-0118,ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow +C-0233,alert-fargate-not-in-use +C-0020,alert-mount-potential-credentials-paths +C-0214,psp-deny-hostpid +C-0243,ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider +C-0250,ensure-service-principle-has-read-only-permissions +C-0186,rule-can-list-get-secrets-v1 +C-0135,ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true +C-0173,kubelet-authorization-mode-alwaysAllow +C-0049,internal-networking +C-0225,ensure-default-service-accounts-has-only-default-roles +C-0225,automount-default-service-account +C-0002,exec-into-container +C-0002,exec-into-container-v1 +C-0037,rule-can-update-configmap +C-0037,rule-can-update-configmap-v1 +C-0062,sudo-in-container-entrypoint +C-0134,ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate +C-0151,ensure-that-the-scheduler-profiling-argument-is-set-to-false +C-0157,etcd-peer-client-auth-cert +C-0101,ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root +C-0042,rule-can-ssh-to-pod +C-0042,rule-can-ssh-to-pod-v1 +C-0059,nginx-ingress-snippet-annotation-vulnerability +C-0171,ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root +C-0124,ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used +C-0073,naked-pods +C-0150,ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1 +C-0091,CVE-2022-47633 +C-0194,pod-security-admission-baseline-applied +C-0053,access-container-service-account +C-0053,access-container-service-account-v1 +C-0052,instance-metadata-api-access +C-0103,ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd +C-0210,set-seccomp-profile-RuntimeDefault +C-0090,CVE-2022-39328 +C-0050,resources-cpu-limit-and-request +C-0079,CVE-2022-0185 +C-0187,rule-list-all-cluster-admins-v1 +C-0104,ensure-that-the-admin.conf-file-permissions-are-set-to-600 +C-0251,list-role-definitions-in-acr +C-0085,excessive_amount_of_vulnerabilities_pods +C-0226,alert-container-optimized-os-not-in-use +C-0009,resource-policies +C-0012,rule-credentials-in-env-var +C-0012,rule-credentials-configmap +C-0221,ensure-image-scanning-enabled-cloud +C-0180,kubelet-event-qps +C-0129,ensure-that-the-api-server-profiling-argument-is-set-to-false +C-0161,audit-policy-content +C-0044,container-hostPort +C-0195,pod-security-admission-baseline-applied +C-0108,ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0165,if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root +C-0066,secret-etcd-encryption-cloud +C-0066,etcd-encryption-native +C-0139,ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate +C-0197,pod-security-admission-restricted-applied +C-0005,insecure-port-flag +C-0120,ensure-that-the-api-server-authorization-mode-argument-includes-RBAC +C-0237,has-image-signature +C-0068,psp-enabled-cloud +C-0068,psp-enabled-native +C-0236,verify-image-signature +C-0216,psp-deny-hostnetwork +C-0084,exposed-rce-pods +C-0158,etcd-peer-auto-tls-disabled +C-0018,configured-readiness-probe +C-0172,anonymous-requests-to-kubelet-service-updated +C-0121,ensure-that-the-admission-control-plugin-EventRateLimit-is-set +C-0154,etcd-client-auth-cert +C-0094,ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0128,ensure-that-the-api-server-secure-port-argument-is-not-set-to-0 +C-0131,ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate +C-0260,ensure_network_policy_configured_in_labels +C-0218,psp-deny-root-container +C-0231,ensure-https-loadbalancers-encrypted-with-tls-aws +C-0110,ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root +C-0074,containers-mounting-docker-socket +C-0168,ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive +C-0146,ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true +C-0252,ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled +C-0069,anonymous-requests-to-kubelet-service-updated +C-0262,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false +C-0262,anonymous-access-enabled +C-0035,rule-list-all-cluster-admins +C-0035,rule-list-all-cluster-admins-v1 +C-0208,external-secret-storage +C-0021,exposed-sensitive-interfaces +C-0021,exposed-sensitive-interfaces-v1 +C-0075,image-pull-policy-is-not-set-to-always +C-0217,psp-deny-allowprivilegeescalation +C-0149,ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true +C-0125,ensure-that-the-admission-control-plugin-ServiceAccount-is-set +C-0191,rule-can-bind-escalate +C-0191,rule-can-impersonate-users-groups-v1 +C-0202,pod-security-admission-baseline-applied +C-0185,cluster-admin-role +C-0162,ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive +C-0206,internal-networking +C-0076,label-usage-for-resources +C-0111,ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive +C-0177,kubelet-protect-kernel-defaults +C-0207,rule-secrets-in-env-var +C-0004,resources-memory-limit-and-request +C-0089,CVE-2022-3172 +C-0255,workload-mounted-secrets +C-0046,insecure-capabilities +C-0242,rule-hostile-multitenant-workloads +C-0259,rule-credentials-in-env-var +C-0112,ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600 +C-0230,ensure-network-policy-is-enabled-eks +C-0240,rule-cni-enabled-aks +C-0136,ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate +C-0081,CVE-2022-24348 +C-0137,ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate +C-0160,k8s-audit-logs-enabled-native-cis +C-0096,ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0145,ensure-that-the-controller-manager-profiling-argument-is-set-to-false +C-0156,etcd-peer-tls-enabled +C-0119,ensure-that-the-api-server-authorization-mode-argument-includes-Node +C-0182,kubelet-rotate-certificates +C-0253,rule-identify-old-k8s-registry +C-0178,kubelet-ip-tables +C-0227,ensure-endpointprivateaccess-is-enabled +C-0030,ingress-and-egress-blocked +C-0097,ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root +C-0007,rule-excessive-delete-rights +C-0007,rule-excessive-delete-rights-v1 +C-0061,pods-in-default-namespace +C-0041,host-network-access +C-0159,etcd-unique-ca +C-0130,ensure-that-the-api-server-audit-log-path-argument-is-set +C-0048,alert-any-hostpath +C-0057,rule-privilege-escalation +C-0211,rule-privilege-escalation +C-0211,immutable-container-filesystem +C-0211,non-root-containers +C-0211,drop-capability-netraw +C-0211,set-seLinuxOptions +C-0211,set-seccomp-profile +C-0211,set-procmount-default +C-0211,set-fsgroup-value +C-0211,set-fsgroupchangepolicy-value +C-0211,set-systctls-params +C-0211,set-supplementalgroups-values +C-0228,ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks +C-0220,psp-required-drop-capabilities +C-0167,ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root +C-0056,configured-liveness-probe +C-0209,list-all-namespaces +C-0095,ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root +C-0122,ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set +C-0107,ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root +C-0098,ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0196,pod-security-admission-baseline-applied +C-0087,CVE-2022-23648 +C-0175,read-only-port-enabled-updated +C-0070,enforce-kubelet-client-tls-authentication-updated +C-0184,kubelet-strong-cryptographics-ciphers +C-0067,k8s-audit-logs-enabled-cloud +C-0067,k8s-audit-logs-enabled-native +C-0166,ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0109,ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root +C-0088,rbac-enabled-cloud +C-0088,rbac-enabled-native +C-0106,ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive +C-0039,list-all-mutating-webhooks +C-0201,pod-security-admission-restricted-applied +C-0126,ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set +C-0026,rule-deny-cronjobs +C-0155,etcd-auto-tls-disabled +C-0092,ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive +C-0257,workload-mounted-pvc +C-0223,ensure_nodeinstancerole_has_right_permissions_for_ecr +C-0238,Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive +C-0232,review-roles-with-aws-iam-authenticator +C-0234,ensure-external-secrets-storage-is-in-use +C-0179,kubelet-hostname-override +C-0219,psp-deny-allowed-capabilities diff --git a/releaseDev/FWName_CID_CName.csv b/releaseDev/FWName_CID_CName.csv new file mode 100644 index 000000000..8acebaa40 --- /dev/null +++ b/releaseDev/FWName_CID_CName.csv @@ -0,0 +1,408 @@ +frameworkName,ControlID,ControlName +AllControls,C-0001,Forbidden Container Registries +AllControls,C-0002,Exec into container +AllControls,C-0004,Resources memory limit and request +AllControls,C-0005,API server insecure port is enabled +AllControls,C-0007,Data Destruction +AllControls,C-0009,Resource limits +AllControls,C-0012,Applications credentials in configuration files +AllControls,C-0013,Non-root containers +AllControls,C-0014,Access Kubernetes dashboard +AllControls,C-0015,List Kubernetes secrets +AllControls,C-0016,Allow privilege escalation +AllControls,C-0017,Immutable container filesystem +AllControls,C-0018,Configured readiness probe +AllControls,C-0020,Mount service principal +AllControls,C-0021,Exposed sensitive interfaces +AllControls,C-0026,Kubernetes CronJob +AllControls,C-0030,Ingress and Egress blocked +AllControls,C-0031,Delete Kubernetes events +AllControls,C-0034,Automatic mapping of service account +AllControls,C-0035,Cluster-admin binding +AllControls,C-0036,Malicious admission controller (validating) +AllControls,C-0038,Host PID/IPC privileges +AllControls,C-0039,Malicious admission controller (mutating) +AllControls,C-0041,HostNetwork access +AllControls,C-0042,SSH server running inside container +AllControls,C-0044,Container hostPort +AllControls,C-0045,Writable hostPath mount +AllControls,C-0046,Insecure capabilities +AllControls,C-0048,HostPath mount +AllControls,C-0049,Network mapping +AllControls,C-0050,Resources CPU limit and request +AllControls,C-0052,Instance Metadata API +AllControls,C-0053,Access container service account +AllControls,C-0054,Cluster internal networking +AllControls,C-0055,Linux hardening +AllControls,C-0056,Configured liveness probe +AllControls,C-0057,Privileged container +AllControls,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +AllControls,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +AllControls,C-0061,Pods in default namespace +AllControls,C-0062,Sudo in container entrypoint +AllControls,C-0063,Portforwarding privileges +AllControls,C-0065,No impersonation +AllControls,C-0066,Secret/ETCD encryption enabled +AllControls,C-0067,Audit logs enabled +AllControls,C-0068,PSP enabled +AllControls,C-0069,Disable anonymous access to Kubelet service +AllControls,C-0070,Enforce Kubelet client TLS authentication +AllControls,C-0073,Naked PODs +AllControls,C-0074,Containers mounting Docker socket +AllControls,C-0075,Image pull policy on latest tag +AllControls,C-0076,Label usage for resources +AllControls,C-0077,K8s common labels usage +AllControls,C-0078,Images from allowed registry +AllControls,C-0079,CVE-2022-0185-linux-kernel-container-escape +AllControls,C-0081,CVE-2022-24348-argocddirtraversal +AllControls,C-0086,CVE-2022-0492-cgroups-container-escape +AllControls,C-0087,CVE-2022-23648-containerd-fs-escape +AllControls,C-0088,RBAC enabled +AllControls,C-0090,CVE-2022-39328-grafana-auth-bypass +AllControls,C-0091,CVE-2022-47633-kyverno-signature-bypass +AllControls,C-0262,Anonymous access enabled +MITRE,C-0002,Exec into container +MITRE,C-0007,Data Destruction +MITRE,C-0012,Applications credentials in configuration files +MITRE,C-0014,Access Kubernetes dashboard +MITRE,C-0015,List Kubernetes secrets +MITRE,C-0020,Mount service principal +MITRE,C-0021,Exposed sensitive interfaces +MITRE,C-0026,Kubernetes CronJob +MITRE,C-0031,Delete Kubernetes events +MITRE,C-0035,Cluster-admin binding +MITRE,C-0036,Malicious admission controller (validating) +MITRE,C-0037,CoreDNS poisoning +MITRE,C-0039,Malicious admission controller (mutating) +MITRE,C-0042,SSH server running inside container +MITRE,C-0045,Writable hostPath mount +MITRE,C-0048,HostPath mount +MITRE,C-0052,Instance Metadata API +MITRE,C-0053,Access container service account +MITRE,C-0054,Cluster internal networking +MITRE,C-0057,Privileged container +MITRE,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +MITRE,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +MITRE,C-0066,Secret/ETCD encryption enabled +MITRE,C-0067,Audit logs enabled +MITRE,C-0068,PSP enabled +MITRE,C-0069,Disable anonymous access to Kubelet service +MITRE,C-0070,Enforce Kubelet client TLS authentication +cis-aks-t1.2.0,C-0078,Images from allowed registry +cis-aks-t1.2.0,C-0088,RBAC enabled +cis-aks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-aks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-aks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-aks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-aks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-aks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 +cis-aks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-aks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-aks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-aks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set +cis-aks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-aks-t1.2.0,C-0182,Ensure that the --rotate-certificates argument is not set to false +cis-aks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-aks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required +cis-aks-t1.2.0,C-0186,Minimize access to secrets +cis-aks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-aks-t1.2.0,C-0188,Minimize access to create pods +cis-aks-t1.2.0,C-0189,Ensure that default service accounts are not actively used +cis-aks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-aks-t1.2.0,C-0201,Minimize the admission of containers with capabilities assigned +cis-aks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies +cis-aks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined +cis-aks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables +cis-aks-t1.2.0,C-0208,Consider external secret storage +cis-aks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces +cis-aks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers +cis-aks-t1.2.0,C-0212,The default namespace should not be used +cis-aks-t1.2.0,C-0213,Minimize the admission of privileged containers +cis-aks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace +cis-aks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace +cis-aks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace +cis-aks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation +cis-aks-t1.2.0,C-0218,Minimize the admission of root containers +cis-aks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities +cis-aks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive +cis-aks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive +cis-aks-t1.2.0,C-0239,Prefer using dedicated AKS Service Accounts +cis-aks-t1.2.0,C-0240,Ensure Network Policy is Enabled and set as appropriate +cis-aks-t1.2.0,C-0241,Use Azure RBAC for Kubernetes Authorization. +cis-aks-t1.2.0,C-0242,Hostile multi-tenant workloads +cis-aks-t1.2.0,C-0243,Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider +cis-aks-t1.2.0,C-0244,Ensure Kubernetes Secrets are encrypted +cis-aks-t1.2.0,C-0245,Encrypt traffic to HTTPS load balancers with TLS certificates +cis-aks-t1.2.0,C-0247,Restrict Access to the Control Plane Endpoint +cis-aks-t1.2.0,C-0248,Ensure clusters are created with Private Nodes +cis-aks-t1.2.0,C-0249,Restrict untrusted workloads +cis-aks-t1.2.0,C-0250,Minimize cluster access to read-only for Azure Container Registry (ACR) +cis-aks-t1.2.0,C-0251,Minimize user access to Azure Container Registry (ACR) +cis-aks-t1.2.0,C-0252,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled +cis-aks-t1.2.0,C-0254,Enable audit Logs +NSA,C-0002,Exec into container +NSA,C-0005,API server insecure port is enabled +NSA,C-0009,Resource limits +NSA,C-0012,Applications credentials in configuration files +NSA,C-0013,Non-root containers +NSA,C-0016,Allow privilege escalation +NSA,C-0017,Immutable container filesystem +NSA,C-0030,Ingress and Egress blocked +NSA,C-0034,Automatic mapping of service account +NSA,C-0035,Cluster-admin binding +NSA,C-0038,Host PID/IPC privileges +NSA,C-0041,HostNetwork access +NSA,C-0044,Container hostPort +NSA,C-0046,Insecure capabilities +NSA,C-0054,Cluster internal networking +NSA,C-0055,Linux hardening +NSA,C-0057,Privileged container +NSA,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +NSA,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +NSA,C-0066,Secret/ETCD encryption enabled +NSA,C-0067,Audit logs enabled +NSA,C-0068,PSP enabled +NSA,C-0069,Disable anonymous access to Kubelet service +NSA,C-0070,Enforce Kubelet client TLS authentication +ArmoBest,C-0001,Forbidden Container Registries +ArmoBest,C-0002,Exec into container +ArmoBest,C-0005,API server insecure port is enabled +ArmoBest,C-0009,Resource limits +ArmoBest,C-0012,Applications credentials in configuration files +ArmoBest,C-0013,Non-root containers +ArmoBest,C-0016,Allow privilege escalation +ArmoBest,C-0017,Immutable container filesystem +ArmoBest,C-0030,Ingress and Egress blocked +ArmoBest,C-0034,Automatic mapping of service account +ArmoBest,C-0035,Cluster-admin binding +ArmoBest,C-0038,Host PID/IPC privileges +ArmoBest,C-0041,HostNetwork access +ArmoBest,C-0044,Container hostPort +ArmoBest,C-0046,Insecure capabilities +ArmoBest,C-0049,Network mapping +ArmoBest,C-0054,Cluster internal networking +ArmoBest,C-0055,Linux hardening +ArmoBest,C-0057,Privileged container +ArmoBest,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. +ArmoBest,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability +ArmoBest,C-0061,Pods in default namespace +ArmoBest,C-0062,Sudo in container entrypoint +ArmoBest,C-0063,Portforwarding privileges +ArmoBest,C-0065,No impersonation +ArmoBest,C-0066,Secret/ETCD encryption enabled +ArmoBest,C-0067,Audit logs enabled +ArmoBest,C-0068,PSP enabled +ArmoBest,C-0069,Disable anonymous access to Kubelet service +ArmoBest,C-0070,Enforce Kubelet client TLS authentication +ArmoBest,C-0078,Images from allowed registry +ArmoBest,C-0079,CVE-2022-0185-linux-kernel-container-escape +ArmoBest,C-0081,CVE-2022-24348-argocddirtraversal +ArmoBest,C-0086,CVE-2022-0492-cgroups-container-escape +ArmoBest,C-0087,CVE-2022-23648-containerd-fs-escape +ArmoBest,C-0089,CVE-2022-3172-aggregated-API-server-redirect +ArmoBest,C-0091,CVE-2022-47633-kyverno-signature-bypass +ArmoBest,C-0236,Verify image signature +ArmoBest,C-0237,Check if signature exists +cis-v1.23-t1.0.1,C-0092,Ensure that the API server pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0093,Ensure that the API server pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0094,Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0095,Ensure that the controller manager pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0096,Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0097,Ensure that the scheduler pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0098,Ensure that the etcd pod specification file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0099,Ensure that the etcd pod specification file ownership is set to root:root +cis-v1.23-t1.0.1,C-0100,Ensure that the Container Network Interface file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0101,Ensure that the Container Network Interface file ownership is set to root:root +cis-v1.23-t1.0.1,C-0102,Ensure that the etcd data directory permissions are set to 700 or more restrictive +cis-v1.23-t1.0.1,C-0103,Ensure that the etcd data directory ownership is set to etcd:etcd +cis-v1.23-t1.0.1,C-0104,Ensure that the admin.conf file permissions are set to 600 +cis-v1.23-t1.0.1,C-0105,Ensure that the admin.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0106,Ensure that the scheduler.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0107,Ensure that the scheduler.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0108,Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0109,Ensure that the controller-manager.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0110,Ensure that the Kubernetes PKI directory and file ownership is set to root:root +cis-v1.23-t1.0.1,C-0111,Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0112,Ensure that the Kubernetes PKI key file permissions are set to 600 +cis-v1.23-t1.0.1,C-0113,Ensure that the API Server --anonymous-auth argument is set to false +cis-v1.23-t1.0.1,C-0114,Ensure that the API Server --token-auth-file parameter is not set +cis-v1.23-t1.0.1,C-0115,Ensure that the API Server --DenyServiceExternalIPs is not set +cis-v1.23-t1.0.1,C-0116,Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate +cis-v1.23-t1.0.1,C-0117,Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate +cis-v1.23-t1.0.1,C-0118,Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow +cis-v1.23-t1.0.1,C-0119,Ensure that the API Server --authorization-mode argument includes Node +cis-v1.23-t1.0.1,C-0120,Ensure that the API Server --authorization-mode argument includes RBAC +cis-v1.23-t1.0.1,C-0121,Ensure that the admission control plugin EventRateLimit is set +cis-v1.23-t1.0.1,C-0122,Ensure that the admission control plugin AlwaysAdmit is not set +cis-v1.23-t1.0.1,C-0123,Ensure that the admission control plugin AlwaysPullImages is set +cis-v1.23-t1.0.1,C-0124,Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used +cis-v1.23-t1.0.1,C-0125,Ensure that the admission control plugin ServiceAccount is set +cis-v1.23-t1.0.1,C-0126,Ensure that the admission control plugin NamespaceLifecycle is set +cis-v1.23-t1.0.1,C-0127,Ensure that the admission control plugin NodeRestriction is set +cis-v1.23-t1.0.1,C-0128,Ensure that the API Server --secure-port argument is not set to 0 +cis-v1.23-t1.0.1,C-0129,Ensure that the API Server --profiling argument is set to false +cis-v1.23-t1.0.1,C-0130,Ensure that the API Server --audit-log-path argument is set +cis-v1.23-t1.0.1,C-0131,Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate +cis-v1.23-t1.0.1,C-0132,Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate +cis-v1.23-t1.0.1,C-0133,Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate +cis-v1.23-t1.0.1,C-0134,Ensure that the API Server --request-timeout argument is set as appropriate +cis-v1.23-t1.0.1,C-0135,Ensure that the API Server --service-account-lookup argument is set to true +cis-v1.23-t1.0.1,C-0136,Ensure that the API Server --service-account-key-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0137,Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate +cis-v1.23-t1.0.1,C-0138,Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0139,Ensure that the API Server --client-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0140,Ensure that the API Server --etcd-cafile argument is set as appropriate +cis-v1.23-t1.0.1,C-0141,Ensure that the API Server --encryption-provider-config argument is set as appropriate +cis-v1.23-t1.0.1,C-0142,Ensure that encryption providers are appropriately configured +cis-v1.23-t1.0.1,C-0143,Ensure that the API Server only makes use of Strong Cryptographic Ciphers +cis-v1.23-t1.0.1,C-0144,Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate +cis-v1.23-t1.0.1,C-0145,Ensure that the Controller Manager --profiling argument is set to false +cis-v1.23-t1.0.1,C-0146,Ensure that the Controller Manager --use-service-account-credentials argument is set to true +cis-v1.23-t1.0.1,C-0147,Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0148,Ensure that the Controller Manager --root-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0149,Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true +cis-v1.23-t1.0.1,C-0150,Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1 +cis-v1.23-t1.0.1,C-0151,Ensure that the Scheduler --profiling argument is set to false +cis-v1.23-t1.0.1,C-0152,Ensure that the Scheduler --bind-address argument is set to 127.0.0.1 +cis-v1.23-t1.0.1,C-0153,Ensure that the --cert-file and --key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0154,Ensure that the --client-cert-auth argument is set to true +cis-v1.23-t1.0.1,C-0155,Ensure that the --auto-tls argument is not set to true +cis-v1.23-t1.0.1,C-0156,Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0157,Ensure that the --peer-client-cert-auth argument is set to true +cis-v1.23-t1.0.1,C-0158,Ensure that the --peer-auto-tls argument is not set to true +cis-v1.23-t1.0.1,C-0159,Ensure that a unique Certificate Authority is used for etcd +cis-v1.23-t1.0.1,C-0160,Ensure that a minimal audit policy is created +cis-v1.23-t1.0.1,C-0161,Ensure that the audit policy covers key security concerns +cis-v1.23-t1.0.1,C-0162,Ensure that the kubelet service file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0163,Ensure that the kubelet service file ownership is set to root:root +cis-v1.23-t1.0.1,C-0164,If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0165,If proxy kubeconfig file exists ensure ownership is set to root:root +cis-v1.23-t1.0.1,C-0166,Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-v1.23-t1.0.1,C-0168,Ensure that the certificate authorities file permissions are set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0169,Ensure that the client certificate authorities file ownership is set to root:root +cis-v1.23-t1.0.1,C-0170,If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive +cis-v1.23-t1.0.1,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-v1.23-t1.0.1,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-v1.23-t1.0.1,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-v1.23-t1.0.1,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-v1.23-t1.0.1,C-0175,Verify that the --read-only-port argument is set to 0 +cis-v1.23-t1.0.1,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-v1.23-t1.0.1,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-v1.23-t1.0.1,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-v1.23-t1.0.1,C-0179,Ensure that the --hostname-override argument is not set +cis-v1.23-t1.0.1,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-v1.23-t1.0.1,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-v1.23-t1.0.1,C-0182,Ensure that the --rotate-certificates argument is not set to false +cis-v1.23-t1.0.1,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-v1.23-t1.0.1,C-0184,Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers +cis-v1.23-t1.0.1,C-0185,Ensure that the cluster-admin role is only used where required +cis-v1.23-t1.0.1,C-0186,Minimize access to secrets +cis-v1.23-t1.0.1,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-v1.23-t1.0.1,C-0188,Minimize access to create pods +cis-v1.23-t1.0.1,C-0189,Ensure that default service accounts are not actively used +cis-v1.23-t1.0.1,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-v1.23-t1.0.1,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" +cis-v1.23-t1.0.1,C-0192,Ensure that the cluster has at least one active policy control mechanism in place +cis-v1.23-t1.0.1,C-0193,Minimize the admission of privileged containers +cis-v1.23-t1.0.1,C-0194,Minimize the admission of containers wishing to share the host process ID namespace +cis-v1.23-t1.0.1,C-0195,Minimize the admission of containers wishing to share the host IPC namespace +cis-v1.23-t1.0.1,C-0196,Minimize the admission of containers wishing to share the host network namespace +cis-v1.23-t1.0.1,C-0197,Minimize the admission of containers with allowPrivilegeEscalation +cis-v1.23-t1.0.1,C-0198,Minimize the admission of root containers +cis-v1.23-t1.0.1,C-0199,Minimize the admission of containers with the NET_RAW capability +cis-v1.23-t1.0.1,C-0200,Minimize the admission of containers with added capabilities +cis-v1.23-t1.0.1,C-0201,Minimize the admission of containers with capabilities assigned +cis-v1.23-t1.0.1,C-0202,Minimize the admission of Windows HostProcess Containers +cis-v1.23-t1.0.1,C-0203,Minimize the admission of HostPath volumes +cis-v1.23-t1.0.1,C-0204,Minimize the admission of containers which use HostPorts +cis-v1.23-t1.0.1,C-0205,Ensure that the CNI in use supports Network Policies +cis-v1.23-t1.0.1,C-0206,Ensure that all Namespaces have Network Policies defined +cis-v1.23-t1.0.1,C-0207,Prefer using secrets as files over secrets as environment variables +cis-v1.23-t1.0.1,C-0208,Consider external secret storage +cis-v1.23-t1.0.1,C-0209,Create administrative boundaries between resources using namespaces +cis-v1.23-t1.0.1,C-0210,Ensure that the seccomp profile is set to docker/default in your pod definitions +cis-v1.23-t1.0.1,C-0211,Apply Security Context to Your Pods and Containers +cis-v1.23-t1.0.1,C-0212,The default namespace should not be used +cis-eks-t1.2.0,C-0066,Secret/ETCD encryption enabled +cis-eks-t1.2.0,C-0067,Audit logs enabled +cis-eks-t1.2.0,C-0078,Images from allowed registry +cis-eks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root +cis-eks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root +cis-eks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false +cis-eks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow +cis-eks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate +cis-eks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 +cis-eks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 +cis-eks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true +cis-eks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true +cis-eks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set +cis-eks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture +cis-eks-t1.2.0,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate +cis-eks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true +cis-eks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required +cis-eks-t1.2.0,C-0186,Minimize access to secrets +cis-eks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles +cis-eks-t1.2.0,C-0188,Minimize access to create pods +cis-eks-t1.2.0,C-0189,Ensure that default service accounts are not actively used +cis-eks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary +cis-eks-t1.2.0,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" +cis-eks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies +cis-eks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined +cis-eks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables +cis-eks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces +cis-eks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers +cis-eks-t1.2.0,C-0212,The default namespace should not be used +cis-eks-t1.2.0,C-0213,Minimize the admission of privileged containers +cis-eks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace +cis-eks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace +cis-eks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace +cis-eks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation +cis-eks-t1.2.0,C-0218,Minimize the admission of root containers +cis-eks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities +cis-eks-t1.2.0,C-0220,Minimize the admission of containers with capabilities assigned +cis-eks-t1.2.0,C-0221,Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider +cis-eks-t1.2.0,C-0222,Minimize user access to Amazon ECR +cis-eks-t1.2.0,C-0223,Minimize cluster access to read-only for Amazon ECR +cis-eks-t1.2.0,C-0225,Prefer using dedicated EKS Service Accounts +cis-eks-t1.2.0,C-0226,Prefer using a container-optimized OS when possible +cis-eks-t1.2.0,C-0227,Restrict Access to the Control Plane Endpoint +cis-eks-t1.2.0,C-0228,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled +cis-eks-t1.2.0,C-0229,Ensure clusters are created with Private Nodes +cis-eks-t1.2.0,C-0230,Ensure Network Policy is Enabled and set as appropriate +cis-eks-t1.2.0,C-0231,Encrypt traffic to HTTPS load balancers with TLS certificates +cis-eks-t1.2.0,C-0232,Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 +cis-eks-t1.2.0,C-0233,Consider Fargate for running untrusted workloads +cis-eks-t1.2.0,C-0234,Consider external secret storage +cis-eks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive +cis-eks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive +cis-eks-t1.2.0,C-0242,Hostile multi-tenant workloads +cis-eks-t1.2.0,C-0246,Avoid use of system:masters group +DevOpsBest,C-0004,Resources memory limit and request +DevOpsBest,C-0018,Configured readiness probe +DevOpsBest,C-0044,Container hostPort +DevOpsBest,C-0050,Resources CPU limit and request +DevOpsBest,C-0056,Configured liveness probe +DevOpsBest,C-0061,Pods in default namespace +DevOpsBest,C-0073,Naked PODs +DevOpsBest,C-0074,Containers mounting Docker socket +DevOpsBest,C-0075,Image pull policy on latest tag +DevOpsBest,C-0076,Label usage for resources +DevOpsBest,C-0077,K8s common labels usage +DevOpsBest,C-0253,Deprecated Kubernetes image registry +security,C-0009,Resource limits +security,C-0017,Immutable container filesystem +security,C-0256,Exposure to internet +security,C-0259,Workload with credential access +security,C-0258,Workload with ConfigMap access +security,C-0257,Workload with PVC access +security,C-0260,Missing network policy +security,C-0261,ServiceAccount token mounted +security,C-0255,Workload with secret access +security,C-0041,HostNetwork access +security,C-0044,Container hostPort +security,C-0045,Writable hostPath mount +security,C-0046,Insecure capabilities +security,C-0048,HostPath mount +security,C-0211,Apply Security Context to Your Pods and Containers +security,C-0262,Anonymous access enabled diff --git a/releaseDev/allcontrols.json b/releaseDev/allcontrols.json new file mode 100644 index 000000000..a98b5e7fe --- /dev/null +++ b/releaseDev/allcontrols.json @@ -0,0 +1,5496 @@ +{ + "name": "AllControls", + "description": "Contains all the controls from all the frameworks", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Forbidden Container Registries", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of the following registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" + } + ] + }, + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Resources memory limit and request", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.memory_request_max", + "settings.postureControlInputs.memory_request_min", + "settings.postureControlInputs.memory_limit_max", + "settings.postureControlInputs.memory_limit_min" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.memory_request_max", + "name": "memory_request_max", + "description": "Ensure memory max requests are set" + }, + { + "path": "settings.postureControlInputs.memory_request_min", + "name": "memory_request_min", + "description": "Ensure memory min requests are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_max", + "name": "memory_limit_max", + "description": "Ensure memory max limits are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_min", + "name": "memory_limit_min", + "description": "Ensure memory min limits are set" + } + ], + "description": "memory limits and requests are not set.", + "remediation": "Ensure memory limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Data Destruction", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-excessive-delete-rights", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" + }, + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resource-policies", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Values", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + } + ] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-access-dashboard", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + } + ] + }, + { + "name": "Configured readiness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-readiness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Mount service principal", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-mount-potential-credentials-paths", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + } + ] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposed-sensitive-interfaces", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.servicesNames" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.servicesNames", + "name": "Service names", + "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" + }, + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + } + ] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", + "armoBuiltin": true + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Defense evasion" + ] + } + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-delete-k8s-events", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" + }, + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Malicious admission controller (validating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "Malicious admission controller (mutating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "SSH server running inside container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-ssh-to-pod", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" + }, + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Network mapping", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0050", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.cpu_request_max", + "settings.postureControlInputs.cpu_request_min", + "settings.postureControlInputs.cpu_limit_min", + "settings.postureControlInputs.cpu_limit_max" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.cpu_request_max", + "name": "cpu_request_max", + "description": "Ensure CPU max requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_request_min", + "name": "cpu_request_min", + "description": "Ensure CPU min requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_max", + "name": "cpu_limit_max", + "description": "Ensure CPU max limits are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_min", + "name": "cpu_limit_min", + "description": "Ensure CPU min limits are set" + } + ], + "description": "CPU limits and requests are not set.", + "remediation": "Ensure CPU limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + } + ] + }, + { + "name": "Instance Metadata API", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Discovery", + "Impact - service access" + ] + } + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + } + ] + }, + { + "name": "Access container service account", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "access-container-service-account", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" + }, + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Configured liveness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "controlID": "C-0056", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-liveness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "sudo-in-container-entrypoint", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-portforward", + "attributes": { + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-can-portforward-v1", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "No impersonation", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-impersonate-users-groups", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Naked PODs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "naked-pods", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Containers mounting Docker socket", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "remediation": "Remove docker socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "containers-mounting-docker-socket", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" + } + ] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + } + ] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "label-usage-for-resources", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following labels." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" + } + ] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "K8s common labels usage", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" + } + ] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + } + ] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0185", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-24348", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-0492-cgroups-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", + "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", + "controlID": "C-0086", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0492", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" + } + ] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation", + "Impact - Data access in container" + ] + } + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-23648", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + } + ] + }, + { + "name": "RBAC enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access", + "Privilege escalation" + ] + } + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rbac-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-39328", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-47633", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "anonymous-access-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous access is enabled on the cluster", + "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0001", + "C-0002", + "C-0004", + "C-0005", + "C-0007", + "C-0009", + "C-0012", + "C-0013", + "C-0014", + "C-0015", + "C-0016", + "C-0017", + "C-0018", + "C-0020", + "C-0021", + "C-0026", + "C-0030", + "C-0031", + "C-0034", + "C-0035", + "C-0036", + "C-0038", + "C-0039", + "C-0041", + "C-0042", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0049", + "C-0050", + "C-0052", + "C-0053", + "C-0054", + "C-0055", + "C-0056", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0078", + "C-0079", + "C-0081", + "C-0086", + "C-0087", + "C-0088", + "C-0090", + "C-0091", + "C-0262" + ] +} \ No newline at end of file diff --git a/releaseDev/armobest.json b/releaseDev/armobest.json new file mode 100644 index 000000000..d3894ca24 --- /dev/null +++ b/releaseDev/armobest.json @@ -0,0 +1,3358 @@ +{ + "name": "ArmoBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Forbidden Container Registries", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of the following registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" + } + ] + }, + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resource-policies", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Values", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "Network mapping", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "sudo-in-container-entrypoint", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-portforward", + "attributes": { + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-can-portforward-v1", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "No impersonation", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-impersonate-users-groups", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + } + ] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0185", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-24348", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "name": "CVE-2022-0492-cgroups-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", + "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", + "controlID": "C-0086", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-0492", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" + } + ] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation", + "Impact - Data access in container" + ] + } + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-23648", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + } + ] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "CVE-2022-3172", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apiregistration.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "APIService" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "apiserverinfo.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", + "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" + } + ] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "CVE-2022-47633", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + } + ] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "verify-image-signature", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "Trusted Cosign public keys" + } + ], + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "has-image-signature", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0001", + "C-0002", + "C-0005", + "C-0009", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0049", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0078", + "C-0079", + "C-0081", + "C-0086", + "C-0087", + "C-0089", + "C-0091", + "C-0236", + "C-0237" + ] +} \ No newline at end of file diff --git a/releaseDev/attack_tracks.json b/releaseDev/attack_tracks.json new file mode 100644 index 000000000..487ad92b4 --- /dev/null +++ b/releaseDev/attack_tracks.json @@ -0,0 +1,59 @@ +[ + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "workload-external-track" + }, + "spec": { + "version": null, + "data": { + "name": "Workload Exposure", + "subSteps": [ + { + "name": "Vulnerable Image", + "checksVulnerabilities": true, + "subSteps": [ + { + "name": "Data Access" + }, + { + "name": "Secret Access" + }, + { + "name": "Credential access" + }, + { + "name": "Potential Node exposure" + }, + { + "name": "Persistence" + }, + { + "name": "Network" + } + ] + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "service-destruction" + }, + "spec": { + "version": null, + "data": { + "name": "Workload Exposure", + "subSteps": [ + { + "name": "Service Destruction" + } + ] + } + } + } +] \ No newline at end of file diff --git a/releaseDev/cis-aks-t1.2.0.json b/releaseDev/cis-aks-t1.2.0.json new file mode 100644 index 000000000..2f084c382 --- /dev/null +++ b/releaseDev/cis-aks-t1.2.0.json @@ -0,0 +1,4072 @@ +{ + "name": "cis-aks-t1.2.0", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "attributes": { + "version": "v1.2.0", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0175", + "C-0179", + "C-0182", + "C-0173", + "C-0174", + "C-0176", + "C-0177", + "C-0178", + "C-0180", + "C-0183" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + }, + "3": { + "name": "Azure Policy / OPA", + "id": "4.3", + "controlsIDs": [] + }, + "4": { + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0206", + "C-0205" + ] + }, + "5": { + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "name": "Extensible Admission Control", + "id": "4.6", + "controlsIDs": [] + }, + "7": { + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + } + ], + "references": [ + "\n\n \n\n " + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." + }, + { + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access", + "Privilege escalation" + ] + } + ] + }, + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rbac-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + } + ], + "references": [ + "\n\n " + ] + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0182", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "\n\n \n\n \n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-certificates", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" + } + ] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "controlID": "C-0201", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Ensure CNI plugin supports network policies.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "external-secret-storage", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + } + ] + }, + { + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "\n\n \n\n \n\n ." + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + }, + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-systctls-params", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.systctls is not set.", + "remediation": "Set securityContext.systctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-rbac-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap", + "Endpoints", + "LimitRange", + "PersistentVolumeClaim", + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController", + "ResourceQuota", + "ServiceAccount", + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ControllerRevision" + ] + }, + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + }, + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + }, + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress", + "NetworkPolicy" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-event-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "events.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "psp-deny-privileged-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostpid", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostipc", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostnetwork", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-root-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + } + ] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowed-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0239", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0240", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-cni-enabled-aks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" + } + ] + }, + { + "controlID": "C-0241", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-azure-rbac-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", + "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" + } + ] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + } + ] + }, + { + "controlID": "C-0243", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", + "ruleQuery": "armo_builtin", + "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" + } + ] + }, + { + "controlID": "C-0244", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + } + ] + }, + { + "controlID": "C-0245", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0247", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "restrict-access-to-the-control-plane-endpoint", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0248", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-clusters-are-created-with-private-nodes", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" + } + ] + }, + { + "controlID": "C-0249", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + }, + { + "controlID": "C-0250", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-service-principle-has-read-only-permissions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0251", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-role-definitions-in-acr", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0252", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" + } + ] + }, + { + "controlID": "C-0254", + "name": "CIS-2.1.1 Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254" + ] +} \ No newline at end of file diff --git a/releaseDev/cis-eks-t1.2.0.json b/releaseDev/cis-eks-t1.2.0.json new file mode 100644 index 000000000..bb150ea09 --- /dev/null +++ b/releaseDev/cis-eks-t1.2.0.json @@ -0,0 +1,4299 @@ +{ + "name": "cis-eks-t1.2.0", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", + "attributes": { + "version": "v1.2.0", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183" + ] + }, + "3": { + "name": "Container Optimized OS", + "id": "3.3", + "controlsIDs": [ + "C-0226" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Policies", + "id": "4.2", + "controlsIDs": [ + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220" + ] + }, + "3": { + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + }, + "6": { + "name": "General Policies", + "id": "4.6", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0221", + "C-0223", + "C-0078" + ] + }, + "2": { + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0233" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ], + "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\nETCDCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", + "references": [ + "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" + ], + "impact_statement": "", + "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." + }, + { + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Use approved container registries.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + } + ], + "references": [ + "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", + "default_value": "" + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/pull/18552" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/issues/22063", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0181", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/41912", + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", + "https://kubernetes.io/docs/imported/release/notes/", + "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/45059", + "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" + } + ] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", + "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", + "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-bind-escalate", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", + "references": [ + "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", + "https://aws.github.io/aws-eks-best-practices/network/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "https://octetz.com/posts/k8s-network-policy-apis", + "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + }, + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-systctls-params", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.systctls is not set.", + "remediation": "Set securityContext.systctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "CIS-4.6.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-rbac-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap", + "Endpoints", + "LimitRange", + "PersistentVolumeClaim", + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController", + "ResourceQuota", + "ServiceAccount", + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ControllerRevision" + ] + }, + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + }, + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + }, + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress", + "NetworkPolicy" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-event-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "events.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "psp-deny-privileged-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostpid", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostipc", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-hostnetwork", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-root-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + } + ] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-deny-allowed-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0220", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-required-drop-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" + } + ] + }, + { + "controlID": "C-0221", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure-image-scanning-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "DescribeRepositories" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" + } + ] + }, + { + "controlID": "C-0222", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-aws-policies-are-present", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "fails if aws policies are not found", + "remediation": "Implement policies to minimize user access to Amazon ECR", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0223", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.2.5" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t#node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" + } + ] + }, + { + "controlID": "C-0225", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "automount-default-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + } + ] + }, + { + "controlID": "C-0226", + "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "alert-container-optimized-os-not-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0227", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-endpointprivateaccess-is-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "controlID": "C-0228", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" + } + ] + }, + { + "controlID": "C-0229", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "controlID": "C-0230", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-network-policy-is-enabled-eks", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0231", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [ + "EKS" + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" + } + ] + }, + { + "controlID": "C-0232", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "review-roles-with-aws-iam-authenticator", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0233", + "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "alert-fargate-not-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0234", + "name": "CIS-4.4.2 Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "ensure-external-secrets-storage-is-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" + } + ] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [ + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [ + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + } + ] + }, + { + "controlID": "C-0246", + "name": "CIS-4.1.7 Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-manual", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + } + ] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0226", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0233", + "C-0234", + "C-0235", + "C-0238", + "C-0242", + "C-0246" + ] +} \ No newline at end of file diff --git a/releaseDev/cis-v1.23-t1.0.1.json b/releaseDev/cis-v1.23-t1.0.1.json new file mode 100644 index 000000000..b7efec379 --- /dev/null +++ b/releaseDev/cis-v1.23-t1.0.1.json @@ -0,0 +1,7883 @@ +{ + "name": "cis-v1.23-t1.0.1", + "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", + "attributes": { + "version": "v1.0.1", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "1": { + "id": "1", + "name": "Control Plane Components", + "subSections": { + "1": { + "id": "1.1", + "name": "Control Plane Node Configuration Files", + "controlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112" + ] + }, + "2": { + "id": "1.2", + "name": "API Server", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143" + ] + }, + "3": { + "id": "1.3", + "name": "Controller Manager", + "controlsIDs": [ + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150" + ] + }, + "4": { + "id": "1.4", + "name": "Scheduler", + "controlsIDs": [ + "C-0151", + "C-0152" + ] + } + } + }, + "2": { + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "2": { + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + } + } + }, + "4": { + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184" + ] + } + } + }, + "5": { + "name": "Policies", + "id": "5", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204" + ] + }, + "3": { + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "7": { + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "controlID": "C-0092", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0093", + "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0094", + "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0095", + "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0096", + "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0097", + "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0098", + "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0099", + "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0100", + "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0101", + "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0102", + "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0103", + "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0104", + "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0105", + "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0106", + "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0107", + "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0108", + "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0109", + "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0110", + "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0111", + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0112", + "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0113", + "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0114", + "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0115", + "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0116", + "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0117", + "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0118", + "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0119", + "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0120", + "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0121", + "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0122", + "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0123", + "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0124", + "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0125", + "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0126", + "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0127", + "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0128", + "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0129", + "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0130", + "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0131", + "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0132", + "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0133", + "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0134", + "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0135", + "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0136", + "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0137", + "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0138", + "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0139", + "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0140", + "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0141", + "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0142", + "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" + } + ] + }, + { + "controlID": "C-0143", + "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0144", + "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0145", + "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0146", + "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0147", + "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0148", + "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0149", + "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0150", + "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + } + ] + }, + { + "controlID": "C-0151", + "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + } + ] + }, + { + "controlID": "C-0152", + "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + } + ] + }, + { + "controlID": "C-0153", + "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-tls-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0154", + "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-client-auth-cert", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0155", + "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-auto-tls-disabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0156", + "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-peer-tls-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0157", + "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-peer-client-auth-cert", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0158", + "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-peer-auto-tls-disabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + } + ] + }, + { + "controlID": "C-0159", + "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "etcd-unique-ca", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := regex.split(\"=\", command)\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0160", + "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-native-cis", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + } + ] + }, + { + "controlID": "C-0161", + "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "audit-policy-content", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n#rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" + } + ] + }, + { + "controlID": "C-0162", + "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0163", + "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0164", + "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0165", + "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0166", + "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0167", + "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0168", + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0169", + "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0170", + "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + } + ] + }, + { + "controlID": "C-0171", + "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + } + ] + }, + { + "controlID": "C-0172", + "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0173", + "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0174", + "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0175", + "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "read-only-port-enabled-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0176", + "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0177", + "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0178", + "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-ip-tables", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0179", + "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-hostname-override", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + } + ] + }, + { + "controlID": "C-0180", + "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-event-qps", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0181", + "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0182", + "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-certificates", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "controlID": "C-0183", + "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" + } + ] + }, + { + "controlID": "C-0184", + "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "kubelet-strong-cryptographics-ciphers", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "cluster-admin-role", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-create-pod", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-default-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "namespace-without-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-can-bind-escalate", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "controlID": "C-0192", + "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "controlID": "C-0193", + "name": "CIS-5.2.2 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0194", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0195", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0196", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0197", + "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0198", + "name": "CIS-5.2.7 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0199", + "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0200", + "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0201", + "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0202", + "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0203", + "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0204", + "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + } + ] + }, + { + "controlID": "C-0205", + "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + } + ] + }, + { + "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-secrets-in-env-var", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "CIS-5.4.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "external-secret-storage", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + } + ] + }, + { + "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "list-all-namespaces", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "set-seccomp-profile-RuntimeDefault", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile as RuntimeDefault", + "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" + } + ] + }, + { + "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + }, + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-systctls-params", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.systctls is not set.", + "remediation": "Set securityContext.systctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "CIS-5.7.4 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-rbac-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap", + "Endpoints", + "LimitRange", + "PersistentVolumeClaim", + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-core2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController", + "ResourceQuota", + "ServiceAccount", + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ControllerRevision" + ] + }, + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + }, + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + }, + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-other2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress", + "NetworkPolicy" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "resources-event-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "events.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112", + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143", + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150", + "C-0151", + "C-0152", + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159", + "C-0160", + "C-0161", + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] +} \ No newline at end of file diff --git a/releaseDev/controls.json b/releaseDev/controls.json new file mode 100644 index 000000000..e5cbc7b9c --- /dev/null +++ b/releaseDev/controls.json @@ -0,0 +1,6728 @@ +[ + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rulesNames": [ + "rule-allow-privilege-escalation" + ], + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "enforce-kubelet-client-tls-authentication-updated" + ], + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0100", + "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0140", + "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "rulesNames": [ + "rule-can-list-get-secrets", + "rule-can-list-get-secrets-v1" + ], + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0164", + "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "rulesNames": [ + "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "validate-kubelet-tls-configuration-updated" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0192", + "name": "Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-applied" + ], + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rulesNames": [ + "immutable-container-filesystem" + ], + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0144", + "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "rulesNames": [ + "internal-networking" + ], + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0170", + "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "rulesNames": [ + "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0143", + "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers" + ], + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0199", + "name": "Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0200", + "name": "Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-restricted-applied" + ], + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0152", + "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "rulesNames": [ + "rule-access-dashboard", + "rule-access-dashboard-subject-v1", + "rule-access-dashboard-wl-v1" + ], + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0142", + "name": "Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-encryption-providers-are-appropriately-configured" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-cni-in-use-supports-network-policies" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-can-create-pod" + ], + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (validating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rulesNames": [ + "list-all-validating-webhooks" + ], + "controlID": "C-0036", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0115", + "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set" + ], + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "rulesNames": [ + "rule-can-impersonate-users-groups", + "rule-can-impersonate-users-groups-v1" + ], + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0133", + "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0105", + "name": "Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "rulesNames": [ + "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0249", + "name": "Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0163", + "name": "Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "rulesNames": [ + "automount-service-account" + ], + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0254", + "name": "Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0203", + "name": "Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0492-cgroups-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", + "rulesNames": [ + "CVE-2022-0492" + ], + "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", + "controlID": "C-0086", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0229", + "name": "Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks" + ], + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Workloads with Critical vulnerabilities exposed to external traffic", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rulesNames": [ + "exposed-critical-pods" + ], + "long_description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service assigned to them.", + "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort services and checks image vulnerability information to see if the image has critical vulnerabilities.", + "controlID": "C-0083", + "baseScore": 8.0, + "example": "@controls/examples/c83.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0114", + "name": "Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-token-auth-file-parameter-is-not-set" + ], + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "automount-default-service-account", + "namespace-without-service-account" + ], + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-hostipc" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-streaming-connection-idle-timeout" + ], + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0099", + "name": "Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "rulesNames": [ + "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0241", + "name": "Use Azure RBAC for Kubernetes Authorization.", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-azure-rbac-is-set" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pods-in-default-namespace", + "resources-rbac-in-default-namespace", + "resources-core1-in-default-namespace", + "resources-core2-in-default-namespace", + "resources-other1-in-default-namespace", + "resources-other2-in-default-namespace", + "resources-secret-in-default-namespace", + "resources-event-in-default-namespace" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0138", + "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive" + ], + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Forbidden Container Registries", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "rulesNames": [ + "rule-identify-blocklisted-image-registries" + ], + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rulesNames": [ + "container-image-repository" + ], + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0102", + "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "rulesNames": [ + "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "rulesNames": [ + "Symlink-Exchange-Can-Allow-Host-Filesystem-Access" + ], + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0222", + "name": "Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-aws-policies-are-present" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Defense evasion" + ] + } + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "rulesNames": [ + "rule-can-delete-k8s-events", + "rule-can-delete-k8s-events-v1" + ], + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0147", + "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate" + ], + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rulesNames": [ + "rule-can-portforward", + "rule-can-portforward-v1" + ], + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposure to internet", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Workload Exposure" + ] + }, + { + "attackTrack": "", + "categories": [ + "" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rulesNames": [ + "exposure-to-internet" + ], + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0247", + "name": "Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "restrict-access-to-the-control-plane-endpoint" + ], + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0248", + "name": "Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-clusters-are-created-with-private-nodes" + ], + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0246", + "name": "Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-manual" + ], + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0239", + "name": "Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-default-service-accounts-has-only-default-roles" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0153", + "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-tls-enabled" + ], + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "rulesNames": [ + "non-root-containers" + ], + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-privileged-container" + ], + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rulesNames": [ + "linux-hardening" + ], + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0132", + "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0244", + "name": "Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "secret-etcd-encryption-cloud" + ], + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "automount-service-account" + ], + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0127", + "name": "Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-NodeRestriction-is-set" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-rotate-kubelet-server-certificate" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0117", + "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0141", + "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0198", + "name": "Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-restricted-applied" + ], + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0093", + "name": "Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "rulesNames": [ + "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0169", + "name": "Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "rulesNames": [ + "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0123", + "name": "Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" + ], + "baseScore": 4, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0116", + "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0193", + "name": "Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "rulesNames": [ + "K8s common labels usage" + ], + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0148", + "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate" + ], + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0204", + "name": "Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rulesNames": [ + "host-pid-ipc-privileges" + ], + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0245", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "encrypt-traffic-to-https-load-balancers-with-tls-certificates" + ], + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "rulesNames": [ + "serviceaccount-token-mount" + ], + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0113", + "name": "Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false" + ], + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Workload with ConfigMap access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-configmap" + ], + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rulesNames": [ + "alert-rw-hostpath" + ], + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0118", + "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow" + ], + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0233", + "name": "Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "alert-fargate-not-in-use" + ], + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "rulesNames": [ + "alert-mount-potential-credentials-paths" + ], + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-hostpid" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0243", + "name": "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider" + ], + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0250", + "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-service-principle-has-read-only-permissions" + ], + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-can-list-get-secrets-v1" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0135", + "name": "Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-authorization-mode-alwaysAllow" + ], + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "rulesNames": [ + "internal-networking" + ], + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0225", + "name": "Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-default-service-accounts-has-only-default-roles", + "automount-default-service-account" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rulesNames": [ + "exec-into-container", + "exec-into-container-v1" + ], + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "rulesNames": [ + "rule-can-update-configmap", + "rule-can-update-configmap-v1" + ], + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "rulesNames": [ + "sudo-in-container-entrypoint" + ], + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0134", + "name": "Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0151", + "name": "Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-scheduler-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0157", + "name": "Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-peer-client-auth-cert" + ], + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0101", + "name": "Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "rulesNames": [ + "rule-can-ssh-to-pod", + "rule-can-ssh-to-pod-v1" + ], + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "rulesNames": [ + "nginx-ingress-snippet-annotation-vulnerability" + ], + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "rulesNames": [ + "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0124", + "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used" + ], + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Naked PODs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", + "rulesNames": [ + "naked-pods" + ], + "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0150", + "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "rulesNames": [ + "CVE-2022-47633" + ], + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0194", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "rulesNames": [ + "access-container-service-account", + "access-container-service-account-v1" + ], + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Discovery", + "Impact - service access" + ] + } + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "rulesNames": [ + "instance-metadata-api-access" + ], + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0103", + "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "set-seccomp-profile-RuntimeDefault" + ], + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "rulesNames": [ + "CVE-2022-39328" + ], + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-cpu-limit-and-request" + ], + "controlID": "C-0050", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "rulesNames": [ + "CVE-2022-0185" + ], + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-list-all-cluster-admins-v1" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0104", + "name": "Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "rulesNames": [ + "ensure-that-the-admin.conf-file-permissions-are-set-to-600" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0251", + "name": "Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "list-role-definitions-in-acr" + ], + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workloads with excessive amount of vulnerabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", + "remediation": "Update your workload images as soon as possible when fixes become available.", + "rulesNames": [ + "excessive_amount_of_vulnerabilities_pods" + ], + "long_description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", + "test": "This control enumerates workloads and checks if they have excessive amount of vulnerabilities in their container images. The threshold of \u201cexcessive number\u201d is configurable.", + "controlID": "C-0085", + "baseScore": 6.0, + "example": "@controls/examples/c85.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0226", + "name": "Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "alert-container-optimized-os-not-in-use" + ], + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "rulesNames": [ + "resource-policies" + ], + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rulesNames": [ + "rule-credentials-in-env-var", + "rule-credentials-configmap" + ], + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0221", + "name": "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-image-scanning-enabled-cloud" + ], + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-event-qps" + ], + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0129", + "name": "Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0161", + "name": "Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "audit-policy-content" + ], + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rulesNames": [ + "container-hostPort" + ], + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0195", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0108", + "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "rulesNames": [ + "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0165", + "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "rulesNames": [ + "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rulesNames": [ + "secret-etcd-encryption-cloud", + "etcd-encryption-native" + ], + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0139", + "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0197", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-restricted-applied" + ], + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rulesNames": [ + "insecure-port-flag" + ], + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0120", + "name": "Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC" + ], + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "has-image-signature" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "rulesNames": [ + "psp-enabled-cloud", + "psp-enabled-native" + ], + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true + }, + "rulesNames": [ + "verify-image-signature" + ], + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-hostnetwork" + ], + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workloads with RCE vulnerabilities exposed to external traffic", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rulesNames": [ + "exposed-rce-pods" + ], + "long_description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", + "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort service and checks the image vulnerability information for the RCE vulnerability.", + "controlID": "C-0084", + "baseScore": 8.0, + "example": "@controls/examples/c84.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0158", + "name": "Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-peer-auto-tls-disabled" + ], + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Configured readiness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "rulesNames": [ + "configured-readiness-probe" + ], + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "anonymous-requests-to-kubelet-service-updated" + ], + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0121", + "name": "Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-EventRateLimit-is-set" + ], + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0154", + "name": "Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-client-auth-cert" + ], + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0094", + "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "rulesNames": [ + "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0128", + "name": "Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0" + ], + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0131", + "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate" + ], + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Network" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rulesNames": [ + "ensure_network_policy_configured_in_labels" + ], + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-root-container" + ], + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0231", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-https-loadbalancers-encrypted-with-tls-aws" + ], + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0110", + "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Containers mounting Docker socket", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "remediation": "Remove docker socket mount request or define an exception.", + "rulesNames": [ + "containers-mounting-docker-socket" + ], + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0168", + "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "rulesNames": [ + "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0146", + "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" + ], + "baseScore": 4, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0252", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled" + ], + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "rulesNames": [ + "anonymous-requests-to-kubelet-service-updated" + ], + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "anonymous-access-enabled" + ], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rulesNames": [ + "rule-list-all-cluster-admins", + "rule-list-all-cluster-admins-v1" + ], + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "external-secret-storage" + ], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "rulesNames": [ + "exposed-sensitive-interfaces", + "exposed-sensitive-interfaces-v1" + ], + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", + "rulesNames": [ + "image-pull-policy-is-not-set-to-always" + ], + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-allowprivilegeescalation" + ], + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0149", + "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0125", + "name": "Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-ServiceAccount-is-set" + ], + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-can-bind-escalate", + "rule-can-impersonate-users-groups-v1" + ], + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0202", + "name": "Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "cluster-admin-role" + ], + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0162", + "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "internal-networking" + ], + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "rulesNames": [ + "label-usage-for-resources" + ], + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0111", + "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-protect-kernel-defaults" + ], + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-secrets-in-env-var" + ], + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Resources memory limit and request", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "rulesNames": [ + "resources-memory-limit-and-request" + ], + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "rulesNames": [ + "CVE-2022-3172" + ], + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Workload with secret access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-secrets" + ], + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rulesNames": [ + "insecure-capabilities" + ], + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-hostile-multitenant-workloads" + ], + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "name": "Workload with credential access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rulesNames": [ + "rule-credentials-in-env-var" + ], + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0112", + "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "rulesNames": [ + "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0230", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-network-policy-is-enabled-eks" + ], + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0240", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-cni-enabled-aks" + ], + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0136", + "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate" + ], + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "rulesNames": [ + "CVE-2022-24348" + ], + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0137", + "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate" + ], + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0160", + "name": "Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "k8s-audit-logs-enabled-native-cis" + ], + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0096", + "name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "rulesNames": [ + "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0145", + "name": "Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-controller-manager-profiling-argument-is-set-to-false" + ], + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0156", + "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-peer-tls-enabled" + ], + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0119", + "name": "Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-authorization-mode-argument-includes-Node" + ], + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-rotate-certificates" + ], + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": { + "armoBuiltin": true + }, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "rulesNames": [ + "rule-identify-old-k8s-registry" + ], + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-ip-tables" + ], + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0227", + "name": "Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-endpointprivateaccess-is-enabled" + ], + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "rulesNames": [ + "ingress-and-egress-blocked" + ], + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0097", + "name": "Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "rulesNames": [ + "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Data Destruction", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "rulesNames": [ + "rule-excessive-delete-rights", + "rule-excessive-delete-rights-v1" + ], + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "rulesNames": [ + "pods-in-default-namespace" + ], + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "rulesNames": [ + "host-network-access" + ], + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0159", + "name": "Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-unique-ca" + ], + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0130", + "name": "Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-api-server-audit-log-path-argument-is-set" + ], + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "rulesNames": [ + "alert-any-hostpath" + ], + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rulesNames": [ + "rule-privilege-escalation" + ], + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "rule-privilege-escalation", + "immutable-container-filesystem", + "non-root-containers", + "drop-capability-netraw", + "set-seLinuxOptions", + "set-seccomp-profile", + "set-procmount-default", + "set-fsgroup-value", + "set-fsgroupchangepolicy-value", + "set-systctls-params", + "set-supplementalgroups-values" + ], + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0228", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks" + ], + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0220", + "name": "Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-required-drop-capabilities" + ], + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0167", + "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "rulesNames": [ + "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "rulesNames": [ + "configured-liveness-probe" + ], + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "controlID": "C-0056", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "list-all-namespaces" + ], + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0095", + "name": "Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "rulesNames": [ + "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0122", + "name": "Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set" + ], + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0107", + "name": "Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "rulesNames": [ + "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0098", + "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "rulesNames": [ + "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0196", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-baseline-applied" + ], + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation", + "Impact - Data access in container" + ] + } + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "rulesNames": [ + "CVE-2022-23648" + ], + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "read-only-port-enabled-updated" + ], + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "rulesNames": [ + "enforce-kubelet-client-tls-authentication-updated" + ], + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0184", + "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-strong-cryptographics-ciphers" + ], + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rulesNames": [ + "k8s-audit-logs-enabled-cloud", + "k8s-audit-logs-enabled-native" + ], + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0166", + "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "rulesNames": [ + "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0109", + "name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "rulesNames": [ + "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "RBAC enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access", + "Privilege escalation" + ] + } + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rulesNames": [ + "rbac-enabled-cloud", + "rbac-enabled-native" + ], + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0106", + "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "rulesNames": [ + "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (mutating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rulesNames": [ + "list-all-mutating-webhooks" + ], + "controlID": "C-0039", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "pod-security-admission-restricted-applied" + ], + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0126", + "name": "Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set" + ], + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "rulesNames": [ + "rule-deny-cronjobs" + ], + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0155", + "name": "Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "etcd-auto-tls-disabled" + ], + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0092", + "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "rulesNames": [ + "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Workload with PVC access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rulesNames": [ + "workload-mounted-pvc" + ], + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0223", + "name": "Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure_nodeinstancerole_has_right_permissions_for_ecr" + ], + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive" + ], + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0232", + "name": "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "review-roles-with-aws-iam-authenticator" + ], + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0234", + "name": "Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "ensure-external-secrets-storage-is-in-use" + ], + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "kubelet-hostname-override" + ], + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "rulesNames": [ + "psp-deny-allowed-capabilities" + ], + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } +] \ No newline at end of file diff --git a/releaseDev/default_config_inputs.json b/releaseDev/default_config_inputs.json new file mode 100644 index 000000000..3ad252704 --- /dev/null +++ b/releaseDev/default_config_inputs.json @@ -0,0 +1,140 @@ +{ + "name": "default", + "attributes": { + "armoBuiltin": true + }, + "scope": { + "designatorType": "attributes", + "attributes": {} + }, + "settings": { + "postureControlInputs": { + "imageRepositoryAllowList": [], + "trustedCosignPublicKeys": [], + "insecureCapabilities": [ + "SETPCAP", + "NET_ADMIN", + "NET_RAW", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PTRACE", + "SYS_ADMIN", + "SYS_BOOT", + "MAC_OVERRIDE", + "MAC_ADMIN", + "PERFMON", + "ALL", + "BPF" + ], + "listOfDangerousArtifacts": [ + "bin/bash", + "sbin/sh", + "bin/ksh", + "bin/tcsh", + "bin/zsh", + "usr/bin/scsh", + "bin/csh", + "bin/busybox", + "usr/bin/busybox" + ], + "publicRegistries": [], + "sensitiveInterfaces": [ + "nifi", + "argo-server", + "weave-scope-app", + "kubeflow", + "kubernetes-dashboard", + "jenkins", + "prometheus-deployment" + ], + "max_critical_vulnerabilities": [ + "5" + ], + "max_high_vulnerabilities": [ + "10" + ], + "sensitiveValuesAllowed": [ + "AllowedValue" + ], + "sensitiveKeyNames": [ + "aws_access_key_id", + "aws_secret_access_key", + "azure_batchai_storage_account", + "azure_batchai_storage_key", + "azure_batch_account", + "azure_batch_key", + "secret", + "key", + "password", + "pwd", + "token", + "jwt", + "bearer", + "credential" + ], + "sensitiveValues": [ + "BEGIN \\w+ PRIVATE KEY", + "PRIVATE KEY", + "eyJhbGciO", + "JWT", + "Bearer", + "_key_", + "_secret_" + ], + "servicesNames": [ + "nifi-service", + "argo-server", + "minio", + "postgres", + "workflow-controller-metrics", + "weave-scope-app", + "kubernetes-dashboard" + ], + "untrustedRegistries": [], + "memory_request_max": [], + "memory_request_min": [], + "memory_limit_max": [], + "memory_limit_min": [], + "cpu_request_max": [], + "cpu_request_min": [], + "cpu_limit_max": [], + "cpu_limit_min": [], + "wlKnownNames": [ + "coredns", + "kube-proxy", + "event-exporter-gke", + "kube-dns", + "17-default-backend", + "metrics-server", + "ca-audit", + "ca-dashboard-aggregator", + "ca-notification-server", + "ca-ocimage", + "ca-oracle", + "ca-posture", + "ca-rbac", + "ca-vuln-scan", + "ca-webhook", + "ca-websocket", + "clair-clair" + ], + "recommendedLabels": [ + "app", + "tier", + "phase", + "version", + "owner", + "env" + ], + "k8sRecommendedLabels": [ + "app.kubernetes.io/name", + "app.kubernetes.io/instance", + "app.kubernetes.io/version", + "app.kubernetes.io/component", + "app.kubernetes.io/part-of", + "app.kubernetes.io/managed-by", + "app.kubernetes.io/created-by" + ] + } + } +} \ No newline at end of file diff --git a/releaseDev/devopsbest.json b/releaseDev/devopsbest.json new file mode 100644 index 000000000..ba2dff28e --- /dev/null +++ b/releaseDev/devopsbest.json @@ -0,0 +1,987 @@ +{ + "name": "DevOpsBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Resources memory limit and request", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-memory-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.memory_request_max", + "settings.postureControlInputs.memory_request_min", + "settings.postureControlInputs.memory_limit_max", + "settings.postureControlInputs.memory_limit_min" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.memory_request_max", + "name": "memory_request_max", + "description": "Ensure memory max requests are set" + }, + { + "path": "settings.postureControlInputs.memory_request_min", + "name": "memory_request_min", + "description": "Ensure memory min requests are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_max", + "name": "memory_limit_max", + "description": "Ensure memory max limits are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_min", + "name": "memory_limit_min", + "description": "Ensure memory min limits are set" + } + ], + "description": "memory limits and requests are not set.", + "remediation": "Ensure memory limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + } + ] + }, + { + "name": "Configured readiness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-readiness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0050", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resources-cpu-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.cpu_request_max", + "settings.postureControlInputs.cpu_request_min", + "settings.postureControlInputs.cpu_limit_min", + "settings.postureControlInputs.cpu_limit_max" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.cpu_request_max", + "name": "cpu_request_max", + "description": "Ensure CPU max requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_request_min", + "name": "cpu_request_min", + "description": "Ensure CPU min requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_max", + "name": "cpu_limit_max", + "description": "Ensure CPU max limits are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_min", + "name": "cpu_limit_min", + "description": "Ensure CPU min limits are set" + } + ], + "description": "CPU limits and requests are not set.", + "remediation": "Ensure CPU limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + } + ] + }, + { + "name": "Configured liveness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "controlID": "C-0056", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "configured-liveness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + } + ] + }, + { + "name": "Naked PODs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "naked-pods", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + } + ] + }, + { + "name": "Containers mounting Docker socket", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "remediation": "Remove docker socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "containers-mounting-docker-socket", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" + } + ] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + } + ] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "label-usage-for-resources", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following labels." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" + } + ] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "K8s common labels usage", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" + } + ] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": { + "armoBuiltin": true + }, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-identify-old-k8s-registry", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Identifying if pod container images are from deprecated K8s registry", + "remediation": "Use images new registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0004", + "C-0018", + "C-0044", + "C-0050", + "C-0056", + "C-0061", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0253" + ] +} \ No newline at end of file diff --git a/releaseDev/exceptions.json b/releaseDev/exceptions.json new file mode 100644 index 000000000..f0ebc1a15 --- /dev/null +++ b/releaseDev/exceptions.json @@ -0,0 +1,6854 @@ +[ + { + "name": "exclude-pod-kube-apiserver", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-apiserver-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0013" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0013 " + }, + { + "controlID": "c-0020" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0016" + }, + { + "controlID": "c-0004" + }, + { + "controlID": "c-0050" + }, + { + "controlID": "c-0009" + }, + { + "controlID": "c-0048" + }, + { + "controlID": "c-0041" + } + ] + }, + { + "name": "exclude-eks-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "aws-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "aws-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "eventrouter" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "ebs-csi-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ebs-csi-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ebs-csi-node-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "aws-cloud-provider" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "aws-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eks-admin" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eks-vpc-resource-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "tagging-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "vpc-resource-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "eventrouter" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-controller-sa" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-node-sa" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:fargate-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-28", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:addon-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:certificate-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "eks:node-manager" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-eks-resources-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:masters" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-kubescape-prometheus-security-context", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-kubescape-prometheus-deployment-allowed-registry", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-default-namespace-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "name": "kubescape", + "namespace": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-default-namespace-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-default-namespace-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-otel", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + } + ] + }, + { + "name": "exclude-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-aggregated-apiserver-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0186" + }, + { + "controlID": "c-0053" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-service-accounts-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-service-accounts-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "storage", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-security-context-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0055" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + }, + { + "controlID": "c-0058" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-allowed-registry-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0001" + }, + { + "controlID": "c-0078" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + } + ] + }, + { + "name": "exclude-kubescape-deployment-ingress-and-egress", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0045" + }, + { + "controlID": "c-0046" + }, + { + "controlID": "c-0048" + }, + { + "controlID": "c-0057" + }, + { + "controlID": "c-0013" + }, + { + "controlID": "c-0016" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0074" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + } + ] + }, + { + "name": "exclude-ks-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-kubescape-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0007" + }, + { + "controlID": "c-0015" + } + ] + }, + { + "name": "exclude-kubescape-default-service-account", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0189" + }, + { + "controlID": "c-0190" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-kubescape-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent-service-account", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0034" + }, + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-kubescape-otel", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + } + ] + }, + { + "name": "exclude-kubescape-host-scanner-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "host-scanner", + "namespace": "kubescape-host-scanner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kubescape-host-scanner-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "host-scanner", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-schedulers-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-schedule-.*", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubescape-registry-scan-.*", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-scheduler", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-schedulers-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubescape-scheduler", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0026" + }, + { + "controlID": "c-0076" + }, + { + "controlID": "c-0077" + }, + { + "controlID": "c-0210" + }, + { + "controlID": "c-0211" + } + ] + }, + { + "name": "exclude-storage-apiserver", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "storage-apiserver", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0030" + }, + { + "controlID": "c-0034" + }, + { + "controlID": "c-0055" + }, + { + "controlID": "c-0056" + }, + { + "controlID": "c-0017" + }, + { + "controlID": "c-0018" + }, + { + "controlID": "c-0076" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "etcd-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-system" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "storage-provisioner" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-scheduler-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-system-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-controller-manager-.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-minikube-kube-public-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-public" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-minikube-kube-public-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-public", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-minikube-kube-node-lease-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-node-lease" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-minikube-kube-node-lease-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-node-lease", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "default" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "certificate-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "bootstrap-signer" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "clusterrole-aggregation-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "root-ca-cert-publisher" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pvc-protection-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "statefulset-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ttl-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "service-account-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "horizontal-pod-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "expand-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "replicaset-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "replication-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "resourcequota-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpoint-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpointslice-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "endpointslicemirroring-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ephemeral-volume-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-21", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pv-protection-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "job-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "daemon-set-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "deployment-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "generic-garbage-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "persistent-volume-binder" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-28", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "storage-provisioner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "token-cleaner" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "namespace-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-32", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cronjob-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-33", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "attachdetach-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-34", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "service-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-35", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "disruption-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-36", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pod-garbage-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-37", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ttl-after-finished-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-scheduler", + "kind": "User" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-controller-manager", + "kind": "User" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:masters", + "kind": "Group" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "azure-ip-masq-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "cloud-node-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "cloud-node-manager-windows" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-deployments-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "omsagent-rs" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-pods-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "cloud-node-manager-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azuredisk-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azurefile-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-pods-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-aks-kube-system-services-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-services-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "kube-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "omsagent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-daemonsets-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "omsagent-win" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-autoscaler-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-replicasets-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "omsagent-rs-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-namespaces-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-system" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "azure-cloud-provider" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cloud-node-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azuredisk-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azurefile-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "omsagent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-46", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "default", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-47", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-node-lease", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-48", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-public", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-49", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-config-reconciled" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-50", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "cluster-autoscaler-status" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-51", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "container-azm-ms-aks-k8scluster" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-52", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-53", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-54", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns-custom" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-55", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "extension-apiserver-authentication" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-56", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-57", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "omsagent-rs-config" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-58", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "overlay-upgrade-data" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-59", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "aks-webhook-admission-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-60", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "aks-node-mutating-webhook" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-61", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "aks-node-validating-webhook" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-62", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:masters" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-63", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Group", + "name": "system:nodes" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-64", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "clusterAdmin" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-65", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-controller-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-aks-kube-system-sa-66", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-scheduler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-gke-kube-system-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-proxy-[A-Za-z0-9-]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "metadata-proxy-v[0-9.]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "node-local-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metrics-agent.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "anetd" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-big" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-small" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-max" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke.*" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nccl-fastsocket-installer" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "filestore-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-17", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-18", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "anetd-win" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-19", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metadata-server" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-20", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metrics-agent-windows" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-22", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-23", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-large" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-24", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-medium" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-25", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "image-package-extractor" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-26", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "namespace": "kube-system", + "name": "image-package-extractor-cleanup" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-27", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-small" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-29", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-30", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-31", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "event-exporter-gke" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-32", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-33", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller-horizontal-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-34", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-35", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server-v[0-9.]+" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-36", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent-autoscaler" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-37", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentd-elasticsearch" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-38", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-gke-kube-system-resources-39", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "l7-default-backend" + } + } + ], + "posturePolicies": [ + { + "controlID": "C-.*" + } + ] + }, + { + "name": "exclude-kube-system-service-accounts-38", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "konnectivity-agent-cpha" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-49", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cloud-provider" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-71", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-78", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-79", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-80", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metadata-proxy" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-81", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-82", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cilium" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-83", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-local-dns" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-84", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "gke-metrics-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-85", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-86", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-87", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "event-exporter-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-88", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-cpha" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-89", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "fluentbit-gke" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-90", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pdcsi-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-91", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-92", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "filestorecsi-node-sa" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-service-accounts-93", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "gke-metadata-server" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-users-and-groups-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:vpa-recommender" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-kube-system-users-and-groups-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:anet-operator" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:clustermetrics" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:controller:glbc" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:l7-lb-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:managed-certificate-controller" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gke-common-webhooks" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gcp-controller-manager" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:resource-tracker" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:storageversionmigrator" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-users-and-groups-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kubestore-collector" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-1", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "ca-validate-cfg" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-2", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-3", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-4", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "nodelimit.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-5", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gkepolicy.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-6", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "APIService", + "name": "v1beta1.metrics.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "pod-ready.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "ca-mutate-cfg" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-10", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "neg-annotation.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-11", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-12", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-13", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "workload-defaulter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-14", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-15", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "gke-vpa-webhook-config" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-system-resources-16", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "filestorecsi-mutation-webhook.storage.k8s.io" + } + } + ], + "posturePolicies": [ + {} + ] + }, + { + "name": "exclude-service-accounts-7", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kube-controller-manager", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-service-accounts-8", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kube-scheduler", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0053" + } + ] + }, + { + "name": "exclude-service-accounts-9", + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "attributes": { + "systemException": true + }, + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "route-controller", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "controlID": "c-0053" + } + ] + } +] \ No newline at end of file diff --git a/releaseDev/frameworks.json b/releaseDev/frameworks.json new file mode 100644 index 000000000..eb7a81b00 --- /dev/null +++ b/releaseDev/frameworks.json @@ -0,0 +1,11419 @@ +[ + { + "name": "AllControls", + "description": "Contains all the controls from all the frameworks", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Forbidden Container Registries", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resources memory limit and request", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Data Destruction", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured readiness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Defense evasion" + ] + } + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (validating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (mutating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0050", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Discovery", + "Impact - service access" + ] + } + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "controlID": "C-0056", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Naked PODs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Containers mounting Docker socket", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "remediation": "Remove docker socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0492-cgroups-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", + "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", + "controlID": "C-0086", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation", + "Impact - Data access in container" + ] + } + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "RBAC enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access", + "Privilege escalation" + ] + } + ] + }, + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", + "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", + "controlID": "C-0090", + "baseScore": 9.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0001", + "C-0002", + "C-0004", + "C-0005", + "C-0007", + "C-0009", + "C-0012", + "C-0013", + "C-0014", + "C-0015", + "C-0016", + "C-0017", + "C-0018", + "C-0020", + "C-0021", + "C-0026", + "C-0030", + "C-0031", + "C-0034", + "C-0035", + "C-0036", + "C-0038", + "C-0039", + "C-0041", + "C-0042", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0049", + "C-0050", + "C-0052", + "C-0053", + "C-0054", + "C-0055", + "C-0056", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0078", + "C-0079", + "C-0081", + "C-0086", + "C-0087", + "C-0088", + "C-0090", + "C-0091", + "C-0262" + ] + }, + { + "name": "MITRE", + "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Data Destruction", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Mount service principal", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Defense evasion" + ] + } + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (validating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Malicious admission controller (mutating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "SSH server running inside container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Instance Metadata API", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Discovery", + "Impact - service access" + ] + } + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "Access container service account", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0007", + "C-0012", + "C-0014", + "C-0015", + "C-0020", + "C-0021", + "C-0026", + "C-0031", + "C-0035", + "C-0036", + "C-0037", + "C-0039", + "C-0042", + "C-0045", + "C-0048", + "C-0052", + "C-0053", + "C-0054", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] + }, + { + "name": "cis-aks-t1.2.0", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "attributes": { + "version": "v1.2.0", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0175", + "C-0179", + "C-0182", + "C-0173", + "C-0174", + "C-0176", + "C-0177", + "C-0178", + "C-0180", + "C-0183" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + }, + "3": { + "name": "Azure Policy / OPA", + "id": "4.3", + "controlsIDs": [] + }, + "4": { + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0206", + "C-0205" + ] + }, + "5": { + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "name": "Extensible Admission Control", + "id": "4.6", + "controlsIDs": [] + }, + "7": { + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "references": [ + "\n\n \n\n " + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." + }, + { + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access", + "Privilege escalation" + ] + } + ] + }, + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", + "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", + "controlID": "C-0088", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [], + "references": [ + "\n\n " + ] + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "\n\n \n\n \n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Ensure CNI plugin supports network policies.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "\n\n \n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "\n\n \n\n \n\n ." + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the Azure AKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0239", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0240", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0241", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", + "remediation": "Set Azure RBAC as access system.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0243", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0244", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0245", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0247", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", + "default_value": "By default, Endpoint Private Access is disabled.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0248", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0249", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", + "remediation": "", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "ACI is not a default component of the AKS", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0250", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0251", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0252", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "manual_test": "", + "references": [ + "\n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0254", + "name": "CIS-2.1.1 Enable audit Logs", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "manual_test": "", + "references": [ + "\n\n \n\n " + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", + "default_value": "By default, cluster control plane logs aren't sent to be Logged.", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254" + ] + }, + { + "name": "NSA", + "description": "Implement NSA security advices for K8s ", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0009", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] + }, + { + "name": "ArmoBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Forbidden Container Registries", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial Access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", + "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", + "controlID": "C-0001", + "baseScore": 7.0, + "example": "@controls/examples/c001.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Network mapping", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0049", + "baseScore": 3.0, + "example": "@controls/examples/c049.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Sudo in container entrypoint", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", + "test": "Check that there is no 'sudo' in the container entrypoint", + "controlID": "C-0062", + "baseScore": 5.0, + "example": "@controls/examples/c062.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Portforwarding privileges", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", + "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", + "controlID": "C-0063", + "baseScore": 5.0, + "example": "@controls/examples/c063.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "No impersonation", + "attributes": { + "armoBuiltin": true, + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", + "controlID": "C-0065", + "baseScore": 6.0, + "example": "@controls/examples/c065.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0079", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", + "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", + "controlID": "C-0081", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-0492-cgroups-container-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", + "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", + "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", + "controlID": "C-0086", + "baseScore": 4.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation", + "Impact - Data access in container" + ] + } + ] + }, + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", + "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", + "controlID": "C-0087", + "baseScore": 7.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", + "controlID": "C-0089", + "baseScore": 3.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", + "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", + "controlID": "C-0091", + "baseScore": 8.0, + "example": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0236", + "name": "Verify image signature", + "description": "Verifies the signature of each image with given public keys", + "long_description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "manual_test": "", + "references": [], + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0237", + "name": "Check if signature exists", + "description": "Ensures that all images contain some signature", + "long_description": "Verifies that each image is signed", + "remediation": "Replace the image with a signed image", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0001", + "C-0002", + "C-0005", + "C-0009", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0049", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0078", + "C-0079", + "C-0081", + "C-0086", + "C-0087", + "C-0089", + "C-0091", + "C-0236", + "C-0237" + ] + }, + { + "name": "cis-v1.23-t1.0.1", + "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", + "attributes": { + "version": "v1.0.1", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "1": { + "id": "1", + "name": "Control Plane Components", + "subSections": { + "1": { + "id": "1.1", + "name": "Control Plane Node Configuration Files", + "controlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112" + ] + }, + "2": { + "id": "1.2", + "name": "API Server", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143" + ] + }, + "3": { + "id": "1.3", + "name": "Controller Manager", + "controlsIDs": [ + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150" + ] + }, + "4": { + "id": "1.4", + "name": "Scheduler", + "controlsIDs": [ + "C-0151", + "C-0152" + ] + } + } + }, + "2": { + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "2": { + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + } + } + }, + "4": { + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184" + ] + } + } + }, + "5": { + "name": "Policies", + "id": "5", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204" + ] + }, + "3": { + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "7": { + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "controlID": "C-0092", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0093", + "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0094", + "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0095", + "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0096", + "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0097", + "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0098", + "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0099", + "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0100", + "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0101", + "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "NA", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0102", + "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory has permissions of `755`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0103", + "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0104", + "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, admin.conf has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0105", + "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None.", + "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0106", + "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0107", + "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0108", + "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0109", + "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0110", + "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0111", + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0112", + "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "None", + "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0113", + "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the API server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0114", + "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", + "description": "Do not use token based authentication.", + "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0115", + "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "default_value": "By default, `--token-auth-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0116", + "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "description": "Enable certificate based kubelet authentication.", + "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, certificate-based kubelet authentication is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0117", + "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "description": "Verify kubelet's certificate before establishing connection.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0118", + "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not always authorize all requests.", + "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Only authorized requests will be served.", + "default_value": "By default, `AlwaysAllow` is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0119", + "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, `Node` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0120", + "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", + "description": "Turn on Role Based Access Control.", + "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "default_value": "By default, `RBAC` authorization is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0121", + "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "description": "Limit the rate at which the API server accepts requests.", + "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "You need to carefully tune in limits as per your environment.", + "default_value": "By default, `EventRateLimit` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0122", + "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "description": "Do not allow all requests.", + "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0123", + "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "description": "Always pull images.", + "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "default_value": "By default, `AlwaysPullImages` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0124", + "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", + "default_value": "By default, `SecurityContextDeny` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0125", + "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", + "description": "Automate service accounts management.", + "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "None.", + "default_value": "By default, `ServiceAccount` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0126", + "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "None", + "default_value": "By default, `NamespaceLifecycle` is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0127", + "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `NodeRestriction` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0128", + "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", + "description": "Do not disable the secure port.", + "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "You need to set the API Server up with the right TLS certificates.", + "default_value": "By default, port 6443 is used as the secure port.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0129", + "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0130", + "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0131", + "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "description": "Retain the logs for at least 30 days or as appropriate.", + "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0132", + "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "description": "Retain 10 or an appropriate number of old log files.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0133", + "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, auditing is not enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0134", + "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", + "description": "Set global request timeout for API server requests as appropriate.", + "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--request-timeout` is set to 60 seconds.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0135", + "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", + "description": "Validate service account before validating token.", + "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `--service-account-lookup` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0136", + "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-key-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0137", + "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0138", + "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0139", + "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", + "description": "Setup TLS connection on the API server.", + "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0140", + "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "TLS and client certificate authentication must be configured for etcd.", + "default_value": "By default, `--etcd-cafile` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0141", + "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "description": "Encrypt etcd key-value store.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `--encryption-provider-config` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0142", + "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, no encryption provider is set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0143", + "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "references": [ + "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0144", + "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "description": "Activate garbage collector on pod termination, as appropriate.", + "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0145", + "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0146", + "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "description": "Use individual service account credentials for each controller.", + "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "default_value": "By default, `--use-service-account-credentials` is set to false.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0147", + "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "default_value": "By default, `--service-account-private-key-file` it not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0148", + "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "You need to setup and maintain root certificate authority file.", + "default_value": "By default, `--root-ca-file` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0149", + "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0150", + "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0151", + "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", + "description": "Disable profiling, if not needed.", + "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Profiling information would not be available.", + "default_value": "By default, profiling is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0152", + "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", + "references": [ + "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "None", + "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0153", + "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "description": "Configure TLS encryption for the etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Client connections only over TLS would be served.", + "default_value": "By default, TLS encryption is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0154", + "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "description": "Enable client authentication on etcd service.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", + "default_value": "By default, the etcd service can be queried by unauthenticated clients.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0155", + "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "description": "Do not use self-signed certificates for TLS.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", + "default_value": "By default, `--auto-tls` is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0156", + "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0157", + "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "description": "etcd should be configured for peer authentication.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0158", + "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0159", + "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "references": [ + "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "default_value": "By default, no etcd certificate is created and used.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0160", + "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "remediation": "Create an audit policy file for your cluster.", + "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0161", + "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "references": [ + "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "default_value": "By default Kubernetes clusters do not log audit information.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0162", + "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, the `kubelet` service file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0163", + "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0164", + "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, proxy file has permissions of `640`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0165", + "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `proxy` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0166", + "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file has permissions of `600`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0167", + "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0168", + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0169", + "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default no `--client-ca-file` is specified.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0170", + "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", + "references": [ + "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "By default, anonymous access is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "By default, `--client-ca-file` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "By default, `--protect-kernel-defaults` is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "default_value": "By default, `--hostname-override` argument is not set.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "By default, `--event-qps` argument is set to `5`.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "description": "Setup TLS connection on the Kubelets.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0182", + "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet client certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "By default, kubelet server certificate rotation is enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0184", + "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", + "references": [ + "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0192", + "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", + "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0193", + "name": "CIS-5.2.2 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of privileged containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0194", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0195", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0196", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0197", + "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0198", + "name": "CIS-5.2.7 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0199", + "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0200", + "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, there are no restrictions on adding capabilities to containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0201", + "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0202", + "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0203", + "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", + "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0204", + "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "description": "Do not generally permit containers which require the use of HostPorts.", + "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "references": [ + "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "default_value": "By default, there are no restrictions on the use of HostPorts.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.4.2 Consider external secret storage", + "controlID": "C-0208", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "manual_test": "Review your secrets management implementation.", + "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", + "references": [ + "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.7.4 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112", + "C-0113", + "C-0114", + "C-0115", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0124", + "C-0125", + "C-0126", + "C-0127", + "C-0128", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0143", + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150", + "C-0151", + "C-0152", + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159", + "C-0160", + "C-0161", + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0192", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + }, + { + "name": "cis-eks-t1.2.0", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", + "attributes": { + "version": "v1.2.0", + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "subSections": { + "2": { + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183" + ] + }, + "3": { + "name": "Container Optimized OS", + "id": "3.3", + "controlsIDs": [ + "C-0226" + ] + } + } + }, + "4": { + "name": "Policies", + "id": "4", + "subSections": { + "1": { + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191" + ] + }, + "2": { + "name": "Pod Security Policies", + "id": "4.2", + "controlsIDs": [ + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220" + ] + }, + "3": { + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + }, + "6": { + "name": "General Policies", + "id": "4.6", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + } + } + }, + "5": { + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0221", + "C-0223", + "C-0078" + ] + }, + "2": { + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + }, + "5": { + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + }, + "6": { + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0233" + ] + } + } + } + }, + "version": null, + "controls": [ + { + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [], + "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\nETCDCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", + "references": [ + "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" + ], + "impact_statement": "", + "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." + }, + { + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Use approved container registries.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", + "test": "Checks if image is from allowed listed registry.", + "controlID": "C-0078", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [], + "references": [ + "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" + ], + "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", + "default_value": "" + }, + { + "controlID": "C-0167", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0171", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0172", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "description": "Disable anonymous requests to the Kubelet server.", + "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Anonymous requests will be rejected.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0173", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "description": "Do not allow all requests. Enable explicit authorization.", + "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Unauthorized requests will be denied.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0174", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "description": "Enable Kubelet authentication using certificates.", + "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", + "references": [ + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", + "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0175", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "description": "Disable the read-only port.", + "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0176", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "description": "Do not disable timeouts on streaming connections.", + "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/pull/18552" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Long-lived connections could be interrupted.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0177", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", + "default_value": "See the EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0178", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "description": "Allow Kubelet to manage iptables.", + "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0179", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "description": "Do not override node hostnames.", + "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/issues/22063", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0180", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://kubernetes.io/docs/admin/kubelet/", + "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 2, + "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0181", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", + "description": "Enable kubelet client certificate rotation.", + "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/41912", + "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", + "https://kubernetes.io/docs/imported/release/notes/", + "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", + "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0183", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "description": "Enable kubelet server certificate rotation.", + "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", + "references": [ + "https://github.com/kubernetes/kubernetes/pull/45059", + "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None", + "default_value": "See the Amazon EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", + "references": [ + "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", + "test": "Check which subjects have wildcard RBAC permissions.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", + "test": "Check which subjects have RBAC permissions to create pods.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "test": "Check that all service accounts and workloads disable automount of service account tokens.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "default_value": "By default, all pods get a service account token mounted in them.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", + "references": [ + "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", + "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", + "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0205", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", + "references": [ + "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", + "https://aws.github.io/aws-eks-best-practices/network/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None.", + "default_value": "This will depend on the CNI plugin in use.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "description": "Use network policies to isolate traffic in your cluster network.", + "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", + "test": "Check for each namespace if there is a network policy defined.", + "references": [ + "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", + "https://octetz.com/posts/k8s-network-policy-apis", + "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", + "default_value": "By default, network policies are not created.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", + "test": "Check if pods have secrets in their environment variables", + "references": [ + "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", + "default_value": "By default, secrets are not defined", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "description": "Use namespaces to isolate your Kubernetes objects.", + "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "test": "Lists all namespaces in cluster for user to review", + "references": [ + "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "You need to switch between namespaces for administration.", + "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "name": "CIS-4.6.3 The default namespace should not be used", + "controlID": "C-0212", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", + "test": "Lists all resources in default namespace for user to review and approve.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 4, + "impact_statement": "None", + "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0213", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", + "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0214", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0215", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0216", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0217", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0218", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "description": "Do not generally permit containers to be run as the root user.", + "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Pods with containers which run as the root user will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0219", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0220", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "description": "Do not generally permit containers with capabilities", + "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", + "references": [ + "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", + "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", + "default_value": "By default, PodSecurityPolicies are not defined.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0221", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", + "default_value": "Images are not scanned by Default.", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "controlID": "C-0222", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0223", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", + "references": [ + "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", + "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0225", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", + "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", + "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0226", + "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", + "remediation": "", + "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", + "references": [ + "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", + "https://aws.amazon.com/bottlerocket/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", + "default_value": "A container-optimized OS is not the default.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0227", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", + "default_value": "By default, Endpoint Public Access is disabled.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0228", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "manual_test": "Check for private endpoint access to the Kubernetes API server", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", + "default_value": "By default, the Public Endpoint is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0229", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8.0, + "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0230", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", + "remediation": "", + "manual_test": "", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", + "default_value": "By default, Network Policy is disabled.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0231", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", + "remediation": "", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5.0, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0232", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", + "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 7, + "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", + "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0233", + "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "long_description": "", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "manual_test": "", + "references": [ + "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 3, + "impact_statement": "", + "default_value": "By default, AWS Fargate is not utilized.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0234", + "name": "CIS-4.4.2 Consider external secret storage", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "manual_test": "Review your secrets management implementation.", + "references": [], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None", + "default_value": "By default, no external secret management is configured.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + }, + { + "controlID": "C-0235", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6.0, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0238", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", + "references": [ + "https://kubernetes.io/docs/admin/kube-proxy/" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 6, + "impact_statement": "None.", + "default_value": "See the AWS EKS documentation for the default value.", + "scanningScope": { + "matches": [ + "EKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0242", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "long_description": "", + "remediation": "", + "manual_test": "", + "references": [ + "" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "impact_statement": "", + "default_value": "", + "scanningScope": { + "matches": [ + "AKS" + ] + }, + "rules": [] + }, + { + "controlID": "C-0246", + "name": "CIS-4.1.7 Avoid use of system:masters group", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "references": [ + "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0220", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0226", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0233", + "C-0234", + "C-0235", + "C-0238", + "C-0242", + "C-0246" + ] + }, + { + "name": "DevOpsBest", + "description": "", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Resources memory limit and request", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ], + "actionRequired": "configuration" + }, + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0004", + "example": "@controls/examples/c004.yaml", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured readiness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", + "controlID": "C-0018", + "example": "@controls/examples/c018.yaml", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "controlID": "C-0050", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Configured liveness probe", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", + "controlID": "C-0056", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Pods in default namespace", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", + "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", + "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", + "test": "Check that there are no pods in the 'default' namespace", + "controlID": "C-0061", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Naked PODs", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", + "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", + "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", + "controlID": "C-0073", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Containers mounting Docker socket", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "remediation": "Remove docker socket mount request or define an exception.", + "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", + "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "controlID": "C-0074", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Image pull policy on latest tag", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", + "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", + "test": "If imagePullPolicy = always pass, else fail.", + "controlID": "C-0075", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "controlID": "C-0076", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "devops" + ] + }, + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", + "controlID": "C-0077", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Deprecated Kubernetes image registry", + "attributes": { + "armoBuiltin": true + }, + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", + "controlID": "C-0253", + "baseScore": 5.0, + "example": "@controls/examples/c239.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0004", + "C-0018", + "C-0044", + "C-0050", + "C-0056", + "C-0061", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0253" + ] + } +] \ No newline at end of file diff --git a/releaseDev/mitre.json b/releaseDev/mitre.json new file mode 100644 index 000000000..0b5143995 --- /dev/null +++ b/releaseDev/mitre.json @@ -0,0 +1,2616 @@ +{ + "name": "MITRE", + "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Data Destruction", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", + "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", + "controlID": "C-0007", + "baseScore": 5, + "example": "@controls/examples/c007.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-excessive-delete-rights", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" + }, + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Values", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + } + ] + }, + { + "name": "Access Kubernetes dashboard", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", + "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", + "controlID": "C-0014", + "baseScore": 2.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-access-dashboard", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "List Kubernetes secrets", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", + "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", + "controlID": "C-0015", + "baseScore": 7.0, + "example": "@controls/examples/c015.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-list-get-secrets", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Mount service principal", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", + "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", + "controlID": "C-0020", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-mount-potential-credentials-paths", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + } + ] + }, + { + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", + "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", + "controlID": "C-0021", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "exposed-sensitive-interfaces", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.servicesNames" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.servicesNames", + "name": "Service names", + "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" + }, + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + } + ] + }, + { + "name": "Kubernetes CronJob", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", + "test": "We list all CronJobs that exist in cluster for the user to approve.", + "controlID": "C-0026", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", + "armoBuiltin": true + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Delete Kubernetes events", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Defense evasion" + ] + } + ] + }, + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", + "test": "List who has delete/deletecollection RBAC permissions on events.", + "controlID": "C-0031", + "baseScore": 4.0, + "example": "@controls/examples/c031.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-delete-k8s-events", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" + }, + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Malicious admission controller (validating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0036", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "CoreDNS poisoning", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", + "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", + "controlID": "C-0037", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-update-configmap", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding", + "ConfigMap" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-update-configmap-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Malicious admission controller (mutating)", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "controlID": "C-0039", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "SSH server running inside container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", + "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", + "controlID": "C-0042", + "baseScore": 3.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-can-ssh-to-pod", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" + }, + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Instance Metadata API", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Discovery", + "Impact - service access" + ] + } + ] + }, + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", + "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", + "controlID": "C-0052", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + } + ] + }, + { + "name": "Access container service account", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", + "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", + "controlID": "C-0053", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "access-container-service-account", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" + }, + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0007", + "C-0012", + "C-0014", + "C-0015", + "C-0020", + "C-0021", + "C-0026", + "C-0031", + "C-0035", + "C-0036", + "C-0037", + "C-0039", + "C-0042", + "C-0045", + "C-0048", + "C-0052", + "C-0053", + "C-0054", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] +} \ No newline at end of file diff --git a/releaseDev/nsa.json b/releaseDev/nsa.json new file mode 100644 index 000000000..821100839 --- /dev/null +++ b/releaseDev/nsa.json @@ -0,0 +1,2145 @@ +{ + "name": "NSA", + "description": "Implement NSA security advices for K8s ", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "compliance" + ], + "version": null, + "controls": [ + { + "name": "Exec into container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", + "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", + "controlID": "C-0002", + "baseScore": 5.0, + "example": "@controls/examples/c002.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exec-into-container", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "API server insecure port is enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", + "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", + "controlID": "C-0005", + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "insecure-port-flag", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + } + ] + }, + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resource-policies", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + } + ] + }, + { + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Credential access" + ] + }, + { + "attackTrack": "container", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", + "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", + "controlID": "C-0012", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Values", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + } + ] + }, + { + "name": "Non-root containers", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", + "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", + "controlID": "C-0013", + "baseScore": 6.0, + "example": "@controls/examples/c013.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + } + ] + }, + { + "name": "Allow privilege escalation", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", + "controlID": "C-0016", + "baseScore": 6.0, + "example": "@controls/examples/allowprivilegeescalation.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-allow-privilege-escalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + } + ] + }, + { + "name": "Ingress and Egress blocked", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "compliance" + ] + }, + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", + "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", + "controlID": "C-0030", + "baseScore": 6.0, + "example": "@controls/examples/c030.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ingress-and-egress-blocked", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + } + ] + }, + { + "name": "Automatic mapping of service account", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Credential access", + "Impact - K8s API access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "long_description": "We have it in Armo best (Automatic mapping of service account token).", + "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", + "controlID": "C-0034", + "baseScore": 6.0, + "example": "@controls/examples/c034.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Cluster-admin binding", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - data destruction", + "Impact - service injection" + ] + } + ] + }, + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", + "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", + "controlID": "C-0035", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-list-all-cluster-admins", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } + ] + }, + { + "name": "Host PID/IPC privileges", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "controlID": "C-0038", + "baseScore": 7.0, + "example": "@controls/examples/c038.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-pid-ipc-privileges", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "Cluster internal networking", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement" + ] + } + ] + }, + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", + "test": "Check for each namespace if there is a network policy defined.", + "controlID": "C-0054", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Linux hardening", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", + "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", + "controlID": "C-0055", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "linux-hardening", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + } + ] + }, + { + "name": "Privileged container", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", + "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", + "controlID": "C-0057", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + } + ] + }, + { + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "controlID": "C-0058", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + } + ] + }, + { + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access", + "Execution" + ] + } + ] + }, + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", + "controlID": "C-0059", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + } + ] + }, + { + "name": "Secret/ETCD encryption enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Impact" + ] + } + ] + }, + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", + "controlID": "C-0066", + "baseScore": 6.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + } + ] + }, + { + "name": "Audit logs enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Defense evasion - KubeAPI" + ] + } + ] + }, + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", + "controlID": "C-0067", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "PSP enabled", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Impact - service injection" + ] + } + ] + }, + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", + "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", + "controlID": "C-0068", + "baseScore": 1.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "psp-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + } + ] + }, + { + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "kubeapi", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", + "controlID": "C-0069", + "baseScore": 10.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + }, + { + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "node", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", + "controlID": "C-0070", + "baseScore": 9.0, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "rules": [ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0002", + "C-0005", + "C-0009", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070" + ] +} \ No newline at end of file diff --git a/releaseDev/rules.json b/releaseDev/rules.json new file mode 100644 index 000000000..ca7448d03 --- /dev/null +++ b/releaseDev/rules.json @@ -0,0 +1,8953 @@ +[ + { + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "CVE-2022-23648", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "resources-other1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ControllerRevision" + ] + }, + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + }, + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + }, + { + "apiGroups": [ + "discovery.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "EndpointSlice" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" + }, + { + "name": "pods-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-clusters-are-created-with-private-nodes", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "alert-mount-potential-credentials-paths", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + }, + { + "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "insecure-port-flag", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" + }, + { + "name": "etcd-peer-auto-tls-disabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "exposed-rce-pods", + "attributes": { + "m$K8sThreatMatrix": "exposed-rce-pods", + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.0.150", + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service", + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n \ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" + }, + { + "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "rule-access-dashboard-subject-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" + }, + { + "name": "psp-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" + }, + { + "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" + }, + { + "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", + "ruleQuery": "armo_builtin", + "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" + }, + { + "name": "pod-security-admission-restricted-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "cluster-admin-role", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" + }, + { + "name": "automount-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + }, + { + "name": "has-image-signature", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-can-list-get-secrets", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "etcd-client-auth-cert", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" + }, + { + "name": "etcd-peer-tls-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "rule-list-all-cluster-admins", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "rule-can-delete-k8s-events", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" + }, + { + "name": "ensure-network-policy-is-enabled-eks", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" + }, + { + "name": "kubelet-rotate-certificates", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "resource-policies", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + }, + { + "name": "kubelet-ip-tables", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "workload-mounted-secrets", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Secret" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "set-systctls-params", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.systctls is not set.", + "remediation": "Set securityContext.systctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" + }, + { + "name": "etcd-unique-ca", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := regex.split(\"=\", command)\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" + }, + { + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", + "armoBuiltin": true + }, + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" + }, + { + "name": "etcd-auto-tls-disabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "access-container-service-account", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" + }, + { + "name": "anonymous-access-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous access is enabled on the cluster", + "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" + }, + { + "name": "ensure-aws-policies-are-present", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "fails if aws policies are not found", + "remediation": "Implement policies to minimize user access to Amazon ECR", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "automount-default-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" + }, + { + "name": "alert-container-optimized-os-not-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" + }, + { + "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "exec-into-container", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" + }, + { + "name": "psp-deny-privileged-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "psp-deny-hostnetwork", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "drop-capability-netraw", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "secret-etcd-encryption-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" + }, + { + "name": "kubelet-protect-kernel-defaults", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the --protect-kernel-defaults argument is set to true.", + "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "rule-excessive-delete-rights", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" + }, + { + "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "resources-secret-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rule-can-bind-escalate", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "resources-rbac-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rbac-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "kubelet-rotate-kubelet-server-certificate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "configInputs": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" + }, + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "exposed-critical-pods", + "attributes": { + "m$K8sThreatMatrix": "exposed-critical-pods", + "armoBuiltin": true, + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service", + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "description": "Fails if pods have exposed services as well as critical vulnerabilities", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n \n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" + }, + { + "name": "configured-liveness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "list-all-namespaces", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "excessive_amount_of_vulnerabilities_pods", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133", + "imageScanRelated": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "configInputs": [ + "settings.postureControlInputs.max_critical_vulnerabilities", + "settings.postureControlInputs.max_high_vulnerabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.max_critical_vulnerabilities", + "name": "Max critical vulnerabilities", + "description": "Maximum amount of allowed critical risk vulnerabilities" + }, + { + "path": "settings.postureControlInputs.max_high_vulnerabilities", + "name": "Max high vulnerabilities", + "description": "Maximum amount of allowed high risk vulnerabilities" + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n\n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}" + }, + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-access-dashboard", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + }, + { + "name": "rule-can-portforward", + "attributes": { + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" + }, + { + "name": "K8s common labels usage", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.k8sRecommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" + }, + { + "name": "label-usage-for-resources", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.recommendedLabels" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one of the following labels." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" + }, + { + "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "workload-mounted-pvc", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts PVC", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-service-principle-has-read-only-permissions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + }, + { + "name": "pod-security-admission-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "set-seLinuxOptions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.publicRegistries", + "settings.postureControlInputs.untrustedRegistries" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of the following registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" + }, + { + "name": "etcd-tls-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "CVE-2022-3172", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apiregistration.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "APIService" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "apiserverinfo.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", + "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" + }, + { + "name": "ingress-and-egress-blocked", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" + }, + { + "name": "ensure-external-secrets-storage-is-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" + }, + { + "name": "psp-required-drop-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" + }, + { + "name": "rule-can-update-configmap", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding", + "ConfigMap" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "psp-deny-hostipc", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "insecure-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + }, + { + "name": "resources-other2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress", + "NetworkPolicy" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "image-pull-policy-is-not-set-to-always", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" + }, + { + "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-azure-rbac-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", + "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" + }, + { + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" + }, + { + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "set-procmount-default", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" + }, + { + "name": "rule-can-list-get-secrets-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "restrict-access-to-the-control-plane-endpoint", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "false", + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" + }, + { + "name": "k8s-audit-logs-enabled-native-cis", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "psp-deny-allowed-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "alert-fargate-not-in-use", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "relevantCloudProviders": [ + "EKS" + ], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" + }, + { + "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + }, + { + "name": "naked-pods", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "sudo-in-container-entrypoint", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" + }, + { + "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "kubelet-event-qps", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "set-seccomp-profile-RuntimeDefault", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile as RuntimeDefault", + "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" + }, + { + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveValues", + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Values", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + }, + { + "name": "CVE-2022-39328", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "ensure-endpointprivateaccess-is-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" + }, + { + "name": "list-all-mutating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Malicious admission controller", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Returns mutating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" + }, + { + "name": "namespace-without-service-account", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "review-roles-with-aws-iam-authenticator", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "ensure-image-scanning-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "DescribeRepositories" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" + }, + { + "name": "external-secret-storage", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" + }, + { + "name": "verify-image-signature", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.1.3" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "Trusted Cosign public keys" + } + ], + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" + }, + { + "name": "container-image-repository", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.imageRepositoryAllowList" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" + }, + { + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.sensitiveInterfaces" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" + }, + { + "name": "linux-hardening", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" + }, + { + "name": "psp-deny-root-container", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" + }, + { + "name": "access-container-service-account-v1", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" + }, + { + "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " + }, + { + "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" + }, + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + }, + { + "name": "resources-cpu-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.cpu_request_max", + "settings.postureControlInputs.cpu_request_min", + "settings.postureControlInputs.cpu_limit_min", + "settings.postureControlInputs.cpu_limit_max" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.cpu_request_max", + "name": "cpu_request_max", + "description": "Ensure CPU max requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_request_min", + "name": "cpu_request_min", + "description": "Ensure CPU min requests are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_max", + "name": "cpu_limit_max", + "description": "Ensure CPU max limits are set" + }, + { + "path": "settings.postureControlInputs.cpu_limit_min", + "name": "cpu_limit_min", + "description": "Ensure CPU min limits are set" + } + ], + "description": "CPU limits and requests are not set.", + "remediation": "Ensure CPU limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + }, + { + "name": "read-only-port-enabled-updated", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "host-pid-ipc-privileges", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" + }, + { + "name": "resources-core1-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap", + "Endpoints", + "LimitRange", + "PersistentVolumeClaim", + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" + }, + { + "name": "CVE-2022-0492", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" + }, + { + "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "rule-cni-enabled-aks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" + }, + { + "name": "psp-deny-hostpid", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "set-seccomp-profile", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "etcd-encryption-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" + }, + { + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "exposure-to-internet", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "extensions", + "networking.k8s.io" + ], + "apiVersions": [ + "v1beta1", + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" + }, + { + "name": "psp-deny-allowprivilegeescalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" + }, + { + "name": "psp-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" + }, + { + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" + }, + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" + }, + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" + }, + { + "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" + }, + { + "name": "resources-event-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "events.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Event" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "rule-secrets-in-env-var", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "etcd-peer-client-auth-cert", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" + }, + { + "name": "pod-security-admission-baseline-applied", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" + }, + { + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "rule-can-update-configmap-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "armoBuiltin": true, + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "list-role-definitions-in-acr", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + } + ], + "relevantCloudProviders": [ + "AKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" + }, + { + "name": "CVE-2022-24348", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-manual", + "attributes": { + "armoBuiltin": true, + "actionRequired": "manual review", + "hostSensorRule": false, + "imageScanRelated": false + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" + }, + { + "name": "containers-mounting-docker-socket", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" + }, + { + "name": "workload-mounted-configmap", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts ConfigMaps", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + }, + { + "name": "rbac-enabled-native", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" + }, + { + "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "exposed-sensitive-interfaces", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "configInputs": [ + "settings.postureControlInputs.servicesNames" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.servicesNames", + "name": "Service names", + "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" + } + ], + "description": "fails if known interfaces have exposed services", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" + }, + { + "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "configured-readiness-probe", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Readiness probe is not configured", + "remediation": "Ensure Readiness probe is configured", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "rule-can-impersonate-users-groups", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "resourceCount": "subjects", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" + }, + { + "name": "rule-can-ssh-to-pod", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "armoBuiltin": true, + "useUntilKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" + }, + { + "name": "ensure-default-service-accounts-has-only-default-roles", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" + }, + { + "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "CVE-2022-0185", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" + }, + { + "name": "CVE-2022-47633", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" + }, + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + }, + { + "name": "serviceaccount-token-mount", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n beggining_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, beggining_of_path, [])\n\n wl_namespace := wl.metadata.namespace\n result := is_sa_auto_mounted(spec, beggining_of_path, wl_namespace)\n \n sa := input[_]\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata.namespace , wl_namespace)\n has_service_account_binding(sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = beggining_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n beggining_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"Pod\"\n beggining_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"CronJob\"\n beggining_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" + }, + { + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" + }, + { + "name": "resources-memory-limit-and-request", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.memory_request_max", + "settings.postureControlInputs.memory_request_min", + "settings.postureControlInputs.memory_limit_max", + "settings.postureControlInputs.memory_limit_min" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.memory_request_max", + "name": "memory_request_max", + "description": "Ensure memory max requests are set" + }, + { + "path": "settings.postureControlInputs.memory_request_min", + "name": "memory_request_min", + "description": "Ensure memory min requests are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_max", + "name": "memory_limit_max", + "description": "Ensure memory max limits are set" + }, + { + "path": "settings.postureControlInputs.memory_limit_min", + "name": "memory_limit_min", + "description": "Ensure memory min limits are set" + } + ], + "description": "memory limits and requests are not set.", + "remediation": "Ensure memory limits and requests are set.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" + }, + { + "name": "rule-allow-privilege-escalation", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container allows privilege escalation", + "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "rule-can-create-pod", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users can create pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "audit-policy-content", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n#rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" + }, + { + "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "exec-into-container-v1", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + }, + { + "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "kubelet-hostname-override", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" + }, + { + "name": "resources-core2-in-default-namespace", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController", + "ResourceQuota", + "ServiceAccount", + "Service" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" + }, + { + "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" + }, + { + "name": "kubelet-strong-cryptographics-ciphers", + "attributes": { + "armoBuiltin": true, + "hostSensorRule": "true" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" + }, + { + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "rule-identify-old-k8s-registry", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Identifying if pod container images are from deprecated K8s registry", + "remediation": "Use images new registry", + "ruleQuery": "", + "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" + }, + { + "name": "host-network-access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + }, + { + "name": "k8s-audit-logs-enabled-cloud", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "relevantCloudProviders": [ + "EKS", + "GKE" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" + }, + { + "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", + "attributes": { + "armoBuiltin": true, + "useFromKubescapeVersion": "v2.2.5" + }, + "ruleLanguage": "Rego", + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "relevantCloudProviders": [ + "EKS" + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t#node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" + }, + { + "name": "rule-can-portforward-v1", + "attributes": { + "armoBuiltin": true, + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" + } +] \ No newline at end of file diff --git a/releaseDev/security.json b/releaseDev/security.json new file mode 100644 index 000000000..473f2af62 --- /dev/null +++ b/releaseDev/security.json @@ -0,0 +1,1949 @@ +{ + "name": "security", + "description": "Controls that are used to assess security threats.", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "resource-policies", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" + } + ] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + } + ] + }, + { + "name": "Exposure to Internet", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Workload Exposure" + ] + }, + { + "attackTrack": "", + "categories": [ + "" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "exposure-to-internet", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "extensions", + "networking.k8s.io" + ], + "apiVersions": [ + "v1beta1", + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" + } + ] + }, + { + "name": "Workload with credential access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "rule-credentials-in-env-var", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.sensitiveKeyNames", + "settings.postureControlInputs.sensitiveValuesAllowed" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Keys", + "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "AllowedValues", + "description": "Allowed values" + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" + } + ] + }, + { + "name": "Workload with configMap access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-mounted-configmap", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts ConfigMaps", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Workload with PVC access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-mounted-pvc", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts PVC", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "Missing network policy", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Network" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure_network_policy_configured_in_labels", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "serviceaccount-token-mount", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n beggining_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, beggining_of_path, [])\n\n wl_namespace := wl.metadata.namespace\n result := is_sa_auto_mounted(spec, beggining_of_path, wl_namespace)\n \n sa := input[_]\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata.namespace , wl_namespace)\n has_service_account_binding(sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = beggining_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n beggining_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"Pod\"\n beggining_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"CronJob\"\n beggining_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n" + } + ] + }, + { + "name": "Workload with secret access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "workload-mounted-secrets", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Secret" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "description": "fails if workload mounts secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" + } + ] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "host-network-access", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if pod has hostNetwork enabled", + "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" + } + ] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "container-hostPort", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" + } + ] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " + } + ] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "insecure-capabilities", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "configInputs": [ + "settings.postureControlInputs.insecureCapabilities" + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" + } + ] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "alert-any-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" + } + ] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [ + { + "name": "rule-privilege-escalation", + "attributes": { + "m$K8sThreatMatrix": "Privilege Escalation::privileged container", + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" + }, + { + "name": "immutable-container-filesystem", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" + }, + { + "name": "non-root-containers", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" + }, + { + "name": "drop-capability-netraw", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" + }, + { + "name": "set-seLinuxOptions", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if workload and container do not define any seLinuxOptions", + "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-seccomp-profile", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" + }, + { + "name": "set-procmount-default", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" + }, + { + "name": "set-fsgroup-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" + }, + { + "name": "set-fsgroupchangepolicy-value", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" + }, + { + "name": "set-systctls-params", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.systctls is not set.", + "remediation": "Set securityContext.systctls params", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + }, + { + "name": "set-supplementalgroups-values", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" + } + ] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [ + { + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [], + "ruleDependencies": [], + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" + }, + { + "name": "anonymous-access-enabled", + "attributes": { + "armoBuiltin": true + }, + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "description": "Fails in case anonymous access is enabled on the cluster", + "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" + } + ] + } + ], + "ControlsIDs": [ + "C-0009", + "C-0017", + "C-0256", + "C-0259", + "C-0258", + "C-0257", + "C-0260", + "C-0261", + "C-0255", + "C-0041", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0211", + "C-0262" + ] +} \ No newline at end of file diff --git a/releaseDev/security_frameworks.json b/releaseDev/security_frameworks.json new file mode 100644 index 000000000..d754710dd --- /dev/null +++ b/releaseDev/security_frameworks.json @@ -0,0 +1,520 @@ +[ + { + "name": "security", + "description": "Controls that are used to assess security threats.", + "attributes": { + "armoBuiltin": true + }, + "typeTags": [ + "security" + ], + "version": null, + "controls": [ + { + "name": "Resource limits", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - service destruction" + ] + } + ] + }, + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", + "controlID": "C-0009", + "baseScore": 7.0, + "example": "@controls/examples/c009.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Immutable container filesystem", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Execution", + "Persistence" + ] + } + ] + }, + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", + "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", + "controlID": "C-0017", + "baseScore": 3.0, + "example": "@controls/examples/c017.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Exposure to Internet", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Workload Exposure" + ] + }, + { + "attackTrack": "", + "categories": [ + "" + ] + } + ] + }, + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", + "controlID": "C-0256", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with credential access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", + "controlID": "C-0259", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with configMap access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", + "controlID": "C-0258", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with PVC access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Access" + ] + } + ] + }, + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", + "controlID": "C-0257", + "baseScore": 4.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Missing network policy", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Network" + ] + } + ] + }, + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "test": "Check that all workloads has a network policy configured in labels.", + "controlID": "C-0260", + "baseScore": 5.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "ServiceAccount token mounted", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", + "controlID": "C-0261", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Workload with secret access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", + "controlID": "C-0255", + "baseScore": 8.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostNetwork access", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Discovery", + "Lateral movement", + "Impact - service access" + ] + } + ] + }, + "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", + "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "long_description": "We have it in ArmoBest", + "test": "", + "controlID": "C-0041", + "baseScore": 7.0, + "example": "@controls/examples/c041.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Container hostPort", + "attributes": { + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "devops" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Initial access" + ] + } + ] + }, + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", + "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", + "controlID": "C-0044", + "baseScore": 4.0, + "example": "@controls/examples/c044.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Writable hostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Persistence", + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", + "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", + "controlID": "C-0045", + "baseScore": 8.0, + "example": "@controls/examples/c045.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Privilege escalation" + ] + } + ] + }, + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", + "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", + "controlID": "C-0046", + "baseScore": 7.0, + "example": "@controls/examples/c046.yaml", + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "HostPath mount", + "attributes": { + "armoBuiltin": true, + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "container", + "categories": [ + "Impact - Data access in container" + ] + } + ] + }, + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "controlID": "C-0048", + "baseScore": 7.0, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + }, + { + "name": "Apply Security Context to Your Pods and Containers", + "controlID": "C-0211", + "description": "Apply Security Context to Your Pods and Containers", + "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", + "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "references": [ + "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" + ], + "attributes": { + "armoBuiltin": true + }, + "baseScore": 8, + "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "default_value": "By default, no security contexts are automatically applied to pods.", + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "rules": [] + }, + { + "controlID": "C-0262", + "name": "Anonymous access enabled", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", + "attributes": { + "armoBuiltin": true + }, + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "rules": [] + } + ], + "ControlsIDs": [ + "C-0009", + "C-0017", + "C-0256", + "C-0259", + "C-0258", + "C-0257", + "C-0260", + "C-0261", + "C-0255", + "C-0041", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0211", + "C-0262" + ] + } +] \ No newline at end of file From 7d560352ce2d523bd376b63fe3e1fc31f59dace4 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 12:52:28 +0300 Subject: [PATCH 3/8] just for system test - check new scope - change branch Signed-off-by: rcohencyberarmor --- gitregostore/datastructures.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 685d4b9da..6218ca55f 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -99,7 +99,7 @@ func (gs *GitRegoStore) SetRegoObjects() error { // NewDefaultGitRegoStore - generates git store object for production regolibrary release files. // Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" func NewDefaultGitRegoStore(frequency int) *GitRegoStore { - gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releaseDev", "/", "scanning-scope-support", frequency) + gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releaseDev", "/", "scanning-scope", frequency) return gs } From c99cc97ba1af9e1cd3836c23739c07eafbe9fb0c Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 13:35:53 +0300 Subject: [PATCH 4/8] add scope to readme Signed-off-by: rcohencyberarmor --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5eb8cd968..2fc09f55a 100644 --- a/README.md +++ b/README.md @@ -70,11 +70,18 @@ Example of a control: "test": "Check that there are no pods in the 'default' namespace", "id": "C-0061", "controlID": "C-0061", - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } ``` * Attribute `"armoBuiltin": true` - mandatory for armo rules. Only ARMO team members are authorized to create builtin objects. * `rulesNames` - List of rules to run, must be exact name. Use copy-paste to be sure. +* `scanningScope` - this control will run just if kubescape scan process match to the scope in the list.(for example the control above will run if the running kubescape scan is for scanning cluster or file) * `long_description`, `test` and other control fields are used mainly in the [documentation](https://hub.armosec.io/docs) From 87b1e0386559413d494d3998e1fba1f7e43a76d8 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 13:37:54 +0300 Subject: [PATCH 5/8] remove print + add function description Signed-off-by: rcohencyberarmor --- scripts/validations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validations.py b/scripts/validations.py index 065acf370..b1e9b0212 100644 --- a/scripts/validations.py +++ b/scripts/validations.py @@ -26,6 +26,7 @@ def validate_controls_in_framework(): # validate control exists and name is according to convention assert control_id in CONTROLID_TO_FILENAME, f"No file found for Control ID {control_id}." +# validate if each control has scanning scope and allowed one def validate_control_scanning_scope(control): allowed_scopes = [["cluster", "file"], ["cluster"], ["cloud"], ["GKE"], ["EKS"], ["AKS"]] controlID=control["controlID"] @@ -87,7 +88,6 @@ def fill_controlID_to_filename_map(): # Load the JSON files if filename.endswith('.json'): with open(os.path.join(CONTROLS_DIR, filename)) as f1: - print(filename) cntl = json.load(f1) CONTROLID_TO_FILENAME[cntl['controlID']] = filename From d7fc8e6e71a4563a20c206b2fcf9a5324d839656 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 13:43:08 +0300 Subject: [PATCH 6/8] add allowed scopes to readme Signed-off-by: rcohencyberarmor --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2fc09f55a..bf0df84da 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,8 @@ Example of a control: ``` * Attribute `"armoBuiltin": true` - mandatory for armo rules. Only ARMO team members are authorized to create builtin objects. * `rulesNames` - List of rules to run, must be exact name. Use copy-paste to be sure. -* `scanningScope` - this control will run just if kubescape scan process match to the scope in the list.(for example the control above will run if the running kubescape scan is for scanning cluster or file) +* `scanningScope` - this control will run just if kubescape scan process match to the scope in the list.(for example the control above will run if the running kubescape scan is for scanning cluster or file) - list of allowed scanning scope ``` [["cluster", "file"], ["cluster"], ["cloud"], ["GKE"], ["EKS"], ["AKS"]] ``` + * `long_description`, `test` and other control fields are used mainly in the [documentation](https://hub.armosec.io/docs) From 74bbf63a4206e161c587233a091895c17dfffd57 Mon Sep 17 00:00:00 2001 From: rcohencyberarmor Date: Thu, 20 Jul 2023 13:46:24 +0300 Subject: [PATCH 7/8] remove realeaseDev dir Signed-off-by: rcohencyberarmor --- releaseDev/ControlID_RuleName.csv | 276 - releaseDev/FWName_CID_CName.csv | 408 - releaseDev/allcontrols.json | 5496 ------------ releaseDev/armobest.json | 3358 ------- releaseDev/attack_tracks.json | 59 - releaseDev/cis-aks-t1.2.0.json | 4072 --------- releaseDev/cis-eks-t1.2.0.json | 4299 --------- releaseDev/cis-v1.23-t1.0.1.json | 7883 ---------------- releaseDev/controls.json | 6728 -------------- releaseDev/default_config_inputs.json | 140 - releaseDev/devopsbest.json | 987 -- releaseDev/exceptions.json | 6854 -------------- releaseDev/frameworks.json | 11419 ------------------------ releaseDev/mitre.json | 2616 ------ releaseDev/nsa.json | 2145 ----- releaseDev/rules.json | 8953 ------------------- releaseDev/security.json | 1949 ---- releaseDev/security_frameworks.json | 520 -- 18 files changed, 68162 deletions(-) delete mode 100644 releaseDev/ControlID_RuleName.csv delete mode 100644 releaseDev/FWName_CID_CName.csv delete mode 100644 releaseDev/allcontrols.json delete mode 100644 releaseDev/armobest.json delete mode 100644 releaseDev/attack_tracks.json delete mode 100644 releaseDev/cis-aks-t1.2.0.json delete mode 100644 releaseDev/cis-eks-t1.2.0.json delete mode 100644 releaseDev/cis-v1.23-t1.0.1.json delete mode 100644 releaseDev/controls.json delete mode 100644 releaseDev/default_config_inputs.json delete mode 100644 releaseDev/devopsbest.json delete mode 100644 releaseDev/exceptions.json delete mode 100644 releaseDev/frameworks.json delete mode 100644 releaseDev/mitre.json delete mode 100644 releaseDev/nsa.json delete mode 100644 releaseDev/rules.json delete mode 100644 releaseDev/security.json delete mode 100644 releaseDev/security_frameworks.json diff --git a/releaseDev/ControlID_RuleName.csv b/releaseDev/ControlID_RuleName.csv deleted file mode 100644 index 0ba1687a1..000000000 --- a/releaseDev/ControlID_RuleName.csv +++ /dev/null @@ -1,276 +0,0 @@ -ControlID,RuleName -C-0016,rule-allow-privilege-escalation -C-0174,enforce-kubelet-client-tls-authentication-updated -C-0100,ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive -C-0140,ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate -C-0015,rule-can-list-get-secrets -C-0015,rule-can-list-get-secrets-v1 -C-0164,if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive -C-0181,validate-kubelet-tls-configuration-updated -C-0192,pod-security-admission-applied -C-0017,immutable-container-filesystem -C-0144,ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate -C-0054,internal-networking -C-0170,if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive -C-0143,ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers -C-0199,pod-security-admission-baseline-applied -C-0200,pod-security-admission-restricted-applied -C-0152,ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1 -C-0014,rule-access-dashboard -C-0014,rule-access-dashboard-subject-v1 -C-0014,rule-access-dashboard-wl-v1 -C-0142,ensure-that-the-api-server-encryption-providers-are-appropriately-configured -C-0205,ensure-that-the-cni-in-use-supports-network-policies -C-0188,rule-can-create-pod -C-0036,list-all-validating-webhooks -C-0115,ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set -C-0065,rule-can-impersonate-users-groups -C-0065,rule-can-impersonate-users-groups-v1 -C-0133,ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate -C-0105,ensure-that-the-admin.conf-file-ownership-is-set-to-root-root -C-0249,rule-manual -C-0163,ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root -C-0034,automount-service-account -C-0254,rule-manual -C-0203,pod-security-admission-baseline-applied -C-0086,CVE-2022-0492 -C-0229,ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks -C-0083,exposed-critical-pods -C-0114,ensure-that-the-api-server-token-auth-file-parameter-is-not-set -C-0189,automount-default-service-account -C-0189,namespace-without-service-account -C-0215,psp-deny-hostipc -C-0176,kubelet-streaming-connection-idle-timeout -C-0099,ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root -C-0241,ensure-azure-rbac-is-set -C-0212,pods-in-default-namespace -C-0212,resources-rbac-in-default-namespace -C-0212,resources-core1-in-default-namespace -C-0212,resources-core2-in-default-namespace -C-0212,resources-other1-in-default-namespace -C-0212,resources-other2-in-default-namespace -C-0212,resources-secret-in-default-namespace -C-0212,resources-event-in-default-namespace -C-0138,ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate -C-0235,ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive -C-0001,rule-identify-blocklisted-image-registries -C-0078,container-image-repository -C-0102,ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive -C-0058,Symlink-Exchange-Can-Allow-Host-Filesystem-Access -C-0222,ensure-aws-policies-are-present -C-0031,rule-can-delete-k8s-events -C-0031,rule-can-delete-k8s-events-v1 -C-0147,ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate -C-0063,rule-can-portforward -C-0063,rule-can-portforward-v1 -C-0256,exposure-to-internet -C-0247,restrict-access-to-the-control-plane-endpoint -C-0248,ensure-clusters-are-created-with-private-nodes -C-0246,rule-manual -C-0239,ensure-default-service-accounts-has-only-default-roles -C-0153,etcd-tls-enabled -C-0013,non-root-containers -C-0213,psp-deny-privileged-container -C-0055,linux-hardening -C-0132,ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate -C-0244,secret-etcd-encryption-cloud -C-0190,automount-service-account -C-0127,ensure-that-the-admission-control-plugin-NodeRestriction-is-set -C-0183,kubelet-rotate-kubelet-server-certificate -C-0117,ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate -C-0141,ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate -C-0198,pod-security-admission-restricted-applied -C-0093,ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root -C-0169,ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root -C-0123,ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set -C-0116,ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate -C-0193,pod-security-admission-baseline-applied -C-0077,K8s common labels usage -C-0148,ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate -C-0204,pod-security-admission-baseline-applied -C-0038,host-pid-ipc-privileges -C-0245,encrypt-traffic-to-https-load-balancers-with-tls-certificates -C-0261,serviceaccount-token-mount -C-0113,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false -C-0258,workload-mounted-configmap -C-0045,alert-rw-hostpath -C-0118,ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow -C-0233,alert-fargate-not-in-use -C-0020,alert-mount-potential-credentials-paths -C-0214,psp-deny-hostpid -C-0243,ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider -C-0250,ensure-service-principle-has-read-only-permissions -C-0186,rule-can-list-get-secrets-v1 -C-0135,ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true -C-0173,kubelet-authorization-mode-alwaysAllow -C-0049,internal-networking -C-0225,ensure-default-service-accounts-has-only-default-roles -C-0225,automount-default-service-account -C-0002,exec-into-container -C-0002,exec-into-container-v1 -C-0037,rule-can-update-configmap -C-0037,rule-can-update-configmap-v1 -C-0062,sudo-in-container-entrypoint -C-0134,ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate -C-0151,ensure-that-the-scheduler-profiling-argument-is-set-to-false -C-0157,etcd-peer-client-auth-cert -C-0101,ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root -C-0042,rule-can-ssh-to-pod -C-0042,rule-can-ssh-to-pod-v1 -C-0059,nginx-ingress-snippet-annotation-vulnerability -C-0171,ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root -C-0124,ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used -C-0073,naked-pods -C-0150,ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1 -C-0091,CVE-2022-47633 -C-0194,pod-security-admission-baseline-applied -C-0053,access-container-service-account -C-0053,access-container-service-account-v1 -C-0052,instance-metadata-api-access -C-0103,ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd -C-0210,set-seccomp-profile-RuntimeDefault -C-0090,CVE-2022-39328 -C-0050,resources-cpu-limit-and-request -C-0079,CVE-2022-0185 -C-0187,rule-list-all-cluster-admins-v1 -C-0104,ensure-that-the-admin.conf-file-permissions-are-set-to-600 -C-0251,list-role-definitions-in-acr -C-0085,excessive_amount_of_vulnerabilities_pods -C-0226,alert-container-optimized-os-not-in-use -C-0009,resource-policies -C-0012,rule-credentials-in-env-var -C-0012,rule-credentials-configmap -C-0221,ensure-image-scanning-enabled-cloud -C-0180,kubelet-event-qps -C-0129,ensure-that-the-api-server-profiling-argument-is-set-to-false -C-0161,audit-policy-content -C-0044,container-hostPort -C-0195,pod-security-admission-baseline-applied -C-0108,ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0165,if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root -C-0066,secret-etcd-encryption-cloud -C-0066,etcd-encryption-native -C-0139,ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate -C-0197,pod-security-admission-restricted-applied -C-0005,insecure-port-flag -C-0120,ensure-that-the-api-server-authorization-mode-argument-includes-RBAC -C-0237,has-image-signature -C-0068,psp-enabled-cloud -C-0068,psp-enabled-native -C-0236,verify-image-signature -C-0216,psp-deny-hostnetwork -C-0084,exposed-rce-pods -C-0158,etcd-peer-auto-tls-disabled -C-0018,configured-readiness-probe -C-0172,anonymous-requests-to-kubelet-service-updated -C-0121,ensure-that-the-admission-control-plugin-EventRateLimit-is-set -C-0154,etcd-client-auth-cert -C-0094,ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0128,ensure-that-the-api-server-secure-port-argument-is-not-set-to-0 -C-0131,ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate -C-0260,ensure_network_policy_configured_in_labels -C-0218,psp-deny-root-container -C-0231,ensure-https-loadbalancers-encrypted-with-tls-aws -C-0110,ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root -C-0074,containers-mounting-docker-socket -C-0168,ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive -C-0146,ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true -C-0252,ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled -C-0069,anonymous-requests-to-kubelet-service-updated -C-0262,ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false -C-0262,anonymous-access-enabled -C-0035,rule-list-all-cluster-admins -C-0035,rule-list-all-cluster-admins-v1 -C-0208,external-secret-storage -C-0021,exposed-sensitive-interfaces -C-0021,exposed-sensitive-interfaces-v1 -C-0075,image-pull-policy-is-not-set-to-always -C-0217,psp-deny-allowprivilegeescalation -C-0149,ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true -C-0125,ensure-that-the-admission-control-plugin-ServiceAccount-is-set -C-0191,rule-can-bind-escalate -C-0191,rule-can-impersonate-users-groups-v1 -C-0202,pod-security-admission-baseline-applied -C-0185,cluster-admin-role -C-0162,ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive -C-0206,internal-networking -C-0076,label-usage-for-resources -C-0111,ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive -C-0177,kubelet-protect-kernel-defaults -C-0207,rule-secrets-in-env-var -C-0004,resources-memory-limit-and-request -C-0089,CVE-2022-3172 -C-0255,workload-mounted-secrets -C-0046,insecure-capabilities -C-0242,rule-hostile-multitenant-workloads -C-0259,rule-credentials-in-env-var -C-0112,ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600 -C-0230,ensure-network-policy-is-enabled-eks -C-0240,rule-cni-enabled-aks -C-0136,ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate -C-0081,CVE-2022-24348 -C-0137,ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate -C-0160,k8s-audit-logs-enabled-native-cis -C-0096,ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0145,ensure-that-the-controller-manager-profiling-argument-is-set-to-false -C-0156,etcd-peer-tls-enabled -C-0119,ensure-that-the-api-server-authorization-mode-argument-includes-Node -C-0182,kubelet-rotate-certificates -C-0253,rule-identify-old-k8s-registry -C-0178,kubelet-ip-tables -C-0227,ensure-endpointprivateaccess-is-enabled -C-0030,ingress-and-egress-blocked -C-0097,ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root -C-0007,rule-excessive-delete-rights -C-0007,rule-excessive-delete-rights-v1 -C-0061,pods-in-default-namespace -C-0041,host-network-access -C-0159,etcd-unique-ca -C-0130,ensure-that-the-api-server-audit-log-path-argument-is-set -C-0048,alert-any-hostpath -C-0057,rule-privilege-escalation -C-0211,rule-privilege-escalation -C-0211,immutable-container-filesystem -C-0211,non-root-containers -C-0211,drop-capability-netraw -C-0211,set-seLinuxOptions -C-0211,set-seccomp-profile -C-0211,set-procmount-default -C-0211,set-fsgroup-value -C-0211,set-fsgroupchangepolicy-value -C-0211,set-systctls-params -C-0211,set-supplementalgroups-values -C-0228,ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks -C-0220,psp-required-drop-capabilities -C-0167,ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root -C-0056,configured-liveness-probe -C-0209,list-all-namespaces -C-0095,ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root -C-0122,ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set -C-0107,ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root -C-0098,ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0196,pod-security-admission-baseline-applied -C-0087,CVE-2022-23648 -C-0175,read-only-port-enabled-updated -C-0070,enforce-kubelet-client-tls-authentication-updated -C-0184,kubelet-strong-cryptographics-ciphers -C-0067,k8s-audit-logs-enabled-cloud -C-0067,k8s-audit-logs-enabled-native -C-0166,ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0109,ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root -C-0088,rbac-enabled-cloud -C-0088,rbac-enabled-native -C-0106,ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive -C-0039,list-all-mutating-webhooks -C-0201,pod-security-admission-restricted-applied -C-0126,ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set -C-0026,rule-deny-cronjobs -C-0155,etcd-auto-tls-disabled -C-0092,ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive -C-0257,workload-mounted-pvc -C-0223,ensure_nodeinstancerole_has_right_permissions_for_ecr -C-0238,Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive -C-0232,review-roles-with-aws-iam-authenticator -C-0234,ensure-external-secrets-storage-is-in-use -C-0179,kubelet-hostname-override -C-0219,psp-deny-allowed-capabilities diff --git a/releaseDev/FWName_CID_CName.csv b/releaseDev/FWName_CID_CName.csv deleted file mode 100644 index 8acebaa40..000000000 --- a/releaseDev/FWName_CID_CName.csv +++ /dev/null @@ -1,408 +0,0 @@ -frameworkName,ControlID,ControlName -AllControls,C-0001,Forbidden Container Registries -AllControls,C-0002,Exec into container -AllControls,C-0004,Resources memory limit and request -AllControls,C-0005,API server insecure port is enabled -AllControls,C-0007,Data Destruction -AllControls,C-0009,Resource limits -AllControls,C-0012,Applications credentials in configuration files -AllControls,C-0013,Non-root containers -AllControls,C-0014,Access Kubernetes dashboard -AllControls,C-0015,List Kubernetes secrets -AllControls,C-0016,Allow privilege escalation -AllControls,C-0017,Immutable container filesystem -AllControls,C-0018,Configured readiness probe -AllControls,C-0020,Mount service principal -AllControls,C-0021,Exposed sensitive interfaces -AllControls,C-0026,Kubernetes CronJob -AllControls,C-0030,Ingress and Egress blocked -AllControls,C-0031,Delete Kubernetes events -AllControls,C-0034,Automatic mapping of service account -AllControls,C-0035,Cluster-admin binding -AllControls,C-0036,Malicious admission controller (validating) -AllControls,C-0038,Host PID/IPC privileges -AllControls,C-0039,Malicious admission controller (mutating) -AllControls,C-0041,HostNetwork access -AllControls,C-0042,SSH server running inside container -AllControls,C-0044,Container hostPort -AllControls,C-0045,Writable hostPath mount -AllControls,C-0046,Insecure capabilities -AllControls,C-0048,HostPath mount -AllControls,C-0049,Network mapping -AllControls,C-0050,Resources CPU limit and request -AllControls,C-0052,Instance Metadata API -AllControls,C-0053,Access container service account -AllControls,C-0054,Cluster internal networking -AllControls,C-0055,Linux hardening -AllControls,C-0056,Configured liveness probe -AllControls,C-0057,Privileged container -AllControls,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -AllControls,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -AllControls,C-0061,Pods in default namespace -AllControls,C-0062,Sudo in container entrypoint -AllControls,C-0063,Portforwarding privileges -AllControls,C-0065,No impersonation -AllControls,C-0066,Secret/ETCD encryption enabled -AllControls,C-0067,Audit logs enabled -AllControls,C-0068,PSP enabled -AllControls,C-0069,Disable anonymous access to Kubelet service -AllControls,C-0070,Enforce Kubelet client TLS authentication -AllControls,C-0073,Naked PODs -AllControls,C-0074,Containers mounting Docker socket -AllControls,C-0075,Image pull policy on latest tag -AllControls,C-0076,Label usage for resources -AllControls,C-0077,K8s common labels usage -AllControls,C-0078,Images from allowed registry -AllControls,C-0079,CVE-2022-0185-linux-kernel-container-escape -AllControls,C-0081,CVE-2022-24348-argocddirtraversal -AllControls,C-0086,CVE-2022-0492-cgroups-container-escape -AllControls,C-0087,CVE-2022-23648-containerd-fs-escape -AllControls,C-0088,RBAC enabled -AllControls,C-0090,CVE-2022-39328-grafana-auth-bypass -AllControls,C-0091,CVE-2022-47633-kyverno-signature-bypass -AllControls,C-0262,Anonymous access enabled -MITRE,C-0002,Exec into container -MITRE,C-0007,Data Destruction -MITRE,C-0012,Applications credentials in configuration files -MITRE,C-0014,Access Kubernetes dashboard -MITRE,C-0015,List Kubernetes secrets -MITRE,C-0020,Mount service principal -MITRE,C-0021,Exposed sensitive interfaces -MITRE,C-0026,Kubernetes CronJob -MITRE,C-0031,Delete Kubernetes events -MITRE,C-0035,Cluster-admin binding -MITRE,C-0036,Malicious admission controller (validating) -MITRE,C-0037,CoreDNS poisoning -MITRE,C-0039,Malicious admission controller (mutating) -MITRE,C-0042,SSH server running inside container -MITRE,C-0045,Writable hostPath mount -MITRE,C-0048,HostPath mount -MITRE,C-0052,Instance Metadata API -MITRE,C-0053,Access container service account -MITRE,C-0054,Cluster internal networking -MITRE,C-0057,Privileged container -MITRE,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -MITRE,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -MITRE,C-0066,Secret/ETCD encryption enabled -MITRE,C-0067,Audit logs enabled -MITRE,C-0068,PSP enabled -MITRE,C-0069,Disable anonymous access to Kubelet service -MITRE,C-0070,Enforce Kubelet client TLS authentication -cis-aks-t1.2.0,C-0078,Images from allowed registry -cis-aks-t1.2.0,C-0088,RBAC enabled -cis-aks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-aks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-aks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-aks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-aks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-aks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 -cis-aks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-aks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-aks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-aks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set -cis-aks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-aks-t1.2.0,C-0182,Ensure that the --rotate-certificates argument is not set to false -cis-aks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-aks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required -cis-aks-t1.2.0,C-0186,Minimize access to secrets -cis-aks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-aks-t1.2.0,C-0188,Minimize access to create pods -cis-aks-t1.2.0,C-0189,Ensure that default service accounts are not actively used -cis-aks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-aks-t1.2.0,C-0201,Minimize the admission of containers with capabilities assigned -cis-aks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies -cis-aks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined -cis-aks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables -cis-aks-t1.2.0,C-0208,Consider external secret storage -cis-aks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces -cis-aks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers -cis-aks-t1.2.0,C-0212,The default namespace should not be used -cis-aks-t1.2.0,C-0213,Minimize the admission of privileged containers -cis-aks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace -cis-aks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace -cis-aks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace -cis-aks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation -cis-aks-t1.2.0,C-0218,Minimize the admission of root containers -cis-aks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities -cis-aks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive -cis-aks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive -cis-aks-t1.2.0,C-0239,Prefer using dedicated AKS Service Accounts -cis-aks-t1.2.0,C-0240,Ensure Network Policy is Enabled and set as appropriate -cis-aks-t1.2.0,C-0241,Use Azure RBAC for Kubernetes Authorization. -cis-aks-t1.2.0,C-0242,Hostile multi-tenant workloads -cis-aks-t1.2.0,C-0243,Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider -cis-aks-t1.2.0,C-0244,Ensure Kubernetes Secrets are encrypted -cis-aks-t1.2.0,C-0245,Encrypt traffic to HTTPS load balancers with TLS certificates -cis-aks-t1.2.0,C-0247,Restrict Access to the Control Plane Endpoint -cis-aks-t1.2.0,C-0248,Ensure clusters are created with Private Nodes -cis-aks-t1.2.0,C-0249,Restrict untrusted workloads -cis-aks-t1.2.0,C-0250,Minimize cluster access to read-only for Azure Container Registry (ACR) -cis-aks-t1.2.0,C-0251,Minimize user access to Azure Container Registry (ACR) -cis-aks-t1.2.0,C-0252,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled -cis-aks-t1.2.0,C-0254,Enable audit Logs -NSA,C-0002,Exec into container -NSA,C-0005,API server insecure port is enabled -NSA,C-0009,Resource limits -NSA,C-0012,Applications credentials in configuration files -NSA,C-0013,Non-root containers -NSA,C-0016,Allow privilege escalation -NSA,C-0017,Immutable container filesystem -NSA,C-0030,Ingress and Egress blocked -NSA,C-0034,Automatic mapping of service account -NSA,C-0035,Cluster-admin binding -NSA,C-0038,Host PID/IPC privileges -NSA,C-0041,HostNetwork access -NSA,C-0044,Container hostPort -NSA,C-0046,Insecure capabilities -NSA,C-0054,Cluster internal networking -NSA,C-0055,Linux hardening -NSA,C-0057,Privileged container -NSA,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -NSA,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -NSA,C-0066,Secret/ETCD encryption enabled -NSA,C-0067,Audit logs enabled -NSA,C-0068,PSP enabled -NSA,C-0069,Disable anonymous access to Kubelet service -NSA,C-0070,Enforce Kubelet client TLS authentication -ArmoBest,C-0001,Forbidden Container Registries -ArmoBest,C-0002,Exec into container -ArmoBest,C-0005,API server insecure port is enabled -ArmoBest,C-0009,Resource limits -ArmoBest,C-0012,Applications credentials in configuration files -ArmoBest,C-0013,Non-root containers -ArmoBest,C-0016,Allow privilege escalation -ArmoBest,C-0017,Immutable container filesystem -ArmoBest,C-0030,Ingress and Egress blocked -ArmoBest,C-0034,Automatic mapping of service account -ArmoBest,C-0035,Cluster-admin binding -ArmoBest,C-0038,Host PID/IPC privileges -ArmoBest,C-0041,HostNetwork access -ArmoBest,C-0044,Container hostPort -ArmoBest,C-0046,Insecure capabilities -ArmoBest,C-0049,Network mapping -ArmoBest,C-0054,Cluster internal networking -ArmoBest,C-0055,Linux hardening -ArmoBest,C-0057,Privileged container -ArmoBest,C-0058,CVE-2021-25741 - Using symlink for arbitrary host file system access. -ArmoBest,C-0059,CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability -ArmoBest,C-0061,Pods in default namespace -ArmoBest,C-0062,Sudo in container entrypoint -ArmoBest,C-0063,Portforwarding privileges -ArmoBest,C-0065,No impersonation -ArmoBest,C-0066,Secret/ETCD encryption enabled -ArmoBest,C-0067,Audit logs enabled -ArmoBest,C-0068,PSP enabled -ArmoBest,C-0069,Disable anonymous access to Kubelet service -ArmoBest,C-0070,Enforce Kubelet client TLS authentication -ArmoBest,C-0078,Images from allowed registry -ArmoBest,C-0079,CVE-2022-0185-linux-kernel-container-escape -ArmoBest,C-0081,CVE-2022-24348-argocddirtraversal -ArmoBest,C-0086,CVE-2022-0492-cgroups-container-escape -ArmoBest,C-0087,CVE-2022-23648-containerd-fs-escape -ArmoBest,C-0089,CVE-2022-3172-aggregated-API-server-redirect -ArmoBest,C-0091,CVE-2022-47633-kyverno-signature-bypass -ArmoBest,C-0236,Verify image signature -ArmoBest,C-0237,Check if signature exists -cis-v1.23-t1.0.1,C-0092,Ensure that the API server pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0093,Ensure that the API server pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0094,Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0095,Ensure that the controller manager pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0096,Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0097,Ensure that the scheduler pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0098,Ensure that the etcd pod specification file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0099,Ensure that the etcd pod specification file ownership is set to root:root -cis-v1.23-t1.0.1,C-0100,Ensure that the Container Network Interface file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0101,Ensure that the Container Network Interface file ownership is set to root:root -cis-v1.23-t1.0.1,C-0102,Ensure that the etcd data directory permissions are set to 700 or more restrictive -cis-v1.23-t1.0.1,C-0103,Ensure that the etcd data directory ownership is set to etcd:etcd -cis-v1.23-t1.0.1,C-0104,Ensure that the admin.conf file permissions are set to 600 -cis-v1.23-t1.0.1,C-0105,Ensure that the admin.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0106,Ensure that the scheduler.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0107,Ensure that the scheduler.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0108,Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0109,Ensure that the controller-manager.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0110,Ensure that the Kubernetes PKI directory and file ownership is set to root:root -cis-v1.23-t1.0.1,C-0111,Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0112,Ensure that the Kubernetes PKI key file permissions are set to 600 -cis-v1.23-t1.0.1,C-0113,Ensure that the API Server --anonymous-auth argument is set to false -cis-v1.23-t1.0.1,C-0114,Ensure that the API Server --token-auth-file parameter is not set -cis-v1.23-t1.0.1,C-0115,Ensure that the API Server --DenyServiceExternalIPs is not set -cis-v1.23-t1.0.1,C-0116,Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate -cis-v1.23-t1.0.1,C-0117,Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate -cis-v1.23-t1.0.1,C-0118,Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow -cis-v1.23-t1.0.1,C-0119,Ensure that the API Server --authorization-mode argument includes Node -cis-v1.23-t1.0.1,C-0120,Ensure that the API Server --authorization-mode argument includes RBAC -cis-v1.23-t1.0.1,C-0121,Ensure that the admission control plugin EventRateLimit is set -cis-v1.23-t1.0.1,C-0122,Ensure that the admission control plugin AlwaysAdmit is not set -cis-v1.23-t1.0.1,C-0123,Ensure that the admission control plugin AlwaysPullImages is set -cis-v1.23-t1.0.1,C-0124,Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used -cis-v1.23-t1.0.1,C-0125,Ensure that the admission control plugin ServiceAccount is set -cis-v1.23-t1.0.1,C-0126,Ensure that the admission control plugin NamespaceLifecycle is set -cis-v1.23-t1.0.1,C-0127,Ensure that the admission control plugin NodeRestriction is set -cis-v1.23-t1.0.1,C-0128,Ensure that the API Server --secure-port argument is not set to 0 -cis-v1.23-t1.0.1,C-0129,Ensure that the API Server --profiling argument is set to false -cis-v1.23-t1.0.1,C-0130,Ensure that the API Server --audit-log-path argument is set -cis-v1.23-t1.0.1,C-0131,Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate -cis-v1.23-t1.0.1,C-0132,Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate -cis-v1.23-t1.0.1,C-0133,Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate -cis-v1.23-t1.0.1,C-0134,Ensure that the API Server --request-timeout argument is set as appropriate -cis-v1.23-t1.0.1,C-0135,Ensure that the API Server --service-account-lookup argument is set to true -cis-v1.23-t1.0.1,C-0136,Ensure that the API Server --service-account-key-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0137,Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate -cis-v1.23-t1.0.1,C-0138,Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0139,Ensure that the API Server --client-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0140,Ensure that the API Server --etcd-cafile argument is set as appropriate -cis-v1.23-t1.0.1,C-0141,Ensure that the API Server --encryption-provider-config argument is set as appropriate -cis-v1.23-t1.0.1,C-0142,Ensure that encryption providers are appropriately configured -cis-v1.23-t1.0.1,C-0143,Ensure that the API Server only makes use of Strong Cryptographic Ciphers -cis-v1.23-t1.0.1,C-0144,Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate -cis-v1.23-t1.0.1,C-0145,Ensure that the Controller Manager --profiling argument is set to false -cis-v1.23-t1.0.1,C-0146,Ensure that the Controller Manager --use-service-account-credentials argument is set to true -cis-v1.23-t1.0.1,C-0147,Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0148,Ensure that the Controller Manager --root-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0149,Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true -cis-v1.23-t1.0.1,C-0150,Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1 -cis-v1.23-t1.0.1,C-0151,Ensure that the Scheduler --profiling argument is set to false -cis-v1.23-t1.0.1,C-0152,Ensure that the Scheduler --bind-address argument is set to 127.0.0.1 -cis-v1.23-t1.0.1,C-0153,Ensure that the --cert-file and --key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0154,Ensure that the --client-cert-auth argument is set to true -cis-v1.23-t1.0.1,C-0155,Ensure that the --auto-tls argument is not set to true -cis-v1.23-t1.0.1,C-0156,Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0157,Ensure that the --peer-client-cert-auth argument is set to true -cis-v1.23-t1.0.1,C-0158,Ensure that the --peer-auto-tls argument is not set to true -cis-v1.23-t1.0.1,C-0159,Ensure that a unique Certificate Authority is used for etcd -cis-v1.23-t1.0.1,C-0160,Ensure that a minimal audit policy is created -cis-v1.23-t1.0.1,C-0161,Ensure that the audit policy covers key security concerns -cis-v1.23-t1.0.1,C-0162,Ensure that the kubelet service file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0163,Ensure that the kubelet service file ownership is set to root:root -cis-v1.23-t1.0.1,C-0164,If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0165,If proxy kubeconfig file exists ensure ownership is set to root:root -cis-v1.23-t1.0.1,C-0166,Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-v1.23-t1.0.1,C-0168,Ensure that the certificate authorities file permissions are set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0169,Ensure that the client certificate authorities file ownership is set to root:root -cis-v1.23-t1.0.1,C-0170,If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive -cis-v1.23-t1.0.1,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-v1.23-t1.0.1,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-v1.23-t1.0.1,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-v1.23-t1.0.1,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-v1.23-t1.0.1,C-0175,Verify that the --read-only-port argument is set to 0 -cis-v1.23-t1.0.1,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-v1.23-t1.0.1,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-v1.23-t1.0.1,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-v1.23-t1.0.1,C-0179,Ensure that the --hostname-override argument is not set -cis-v1.23-t1.0.1,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-v1.23-t1.0.1,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-v1.23-t1.0.1,C-0182,Ensure that the --rotate-certificates argument is not set to false -cis-v1.23-t1.0.1,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-v1.23-t1.0.1,C-0184,Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers -cis-v1.23-t1.0.1,C-0185,Ensure that the cluster-admin role is only used where required -cis-v1.23-t1.0.1,C-0186,Minimize access to secrets -cis-v1.23-t1.0.1,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-v1.23-t1.0.1,C-0188,Minimize access to create pods -cis-v1.23-t1.0.1,C-0189,Ensure that default service accounts are not actively used -cis-v1.23-t1.0.1,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-v1.23-t1.0.1,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" -cis-v1.23-t1.0.1,C-0192,Ensure that the cluster has at least one active policy control mechanism in place -cis-v1.23-t1.0.1,C-0193,Minimize the admission of privileged containers -cis-v1.23-t1.0.1,C-0194,Minimize the admission of containers wishing to share the host process ID namespace -cis-v1.23-t1.0.1,C-0195,Minimize the admission of containers wishing to share the host IPC namespace -cis-v1.23-t1.0.1,C-0196,Minimize the admission of containers wishing to share the host network namespace -cis-v1.23-t1.0.1,C-0197,Minimize the admission of containers with allowPrivilegeEscalation -cis-v1.23-t1.0.1,C-0198,Minimize the admission of root containers -cis-v1.23-t1.0.1,C-0199,Minimize the admission of containers with the NET_RAW capability -cis-v1.23-t1.0.1,C-0200,Minimize the admission of containers with added capabilities -cis-v1.23-t1.0.1,C-0201,Minimize the admission of containers with capabilities assigned -cis-v1.23-t1.0.1,C-0202,Minimize the admission of Windows HostProcess Containers -cis-v1.23-t1.0.1,C-0203,Minimize the admission of HostPath volumes -cis-v1.23-t1.0.1,C-0204,Minimize the admission of containers which use HostPorts -cis-v1.23-t1.0.1,C-0205,Ensure that the CNI in use supports Network Policies -cis-v1.23-t1.0.1,C-0206,Ensure that all Namespaces have Network Policies defined -cis-v1.23-t1.0.1,C-0207,Prefer using secrets as files over secrets as environment variables -cis-v1.23-t1.0.1,C-0208,Consider external secret storage -cis-v1.23-t1.0.1,C-0209,Create administrative boundaries between resources using namespaces -cis-v1.23-t1.0.1,C-0210,Ensure that the seccomp profile is set to docker/default in your pod definitions -cis-v1.23-t1.0.1,C-0211,Apply Security Context to Your Pods and Containers -cis-v1.23-t1.0.1,C-0212,The default namespace should not be used -cis-eks-t1.2.0,C-0066,Secret/ETCD encryption enabled -cis-eks-t1.2.0,C-0067,Audit logs enabled -cis-eks-t1.2.0,C-0078,Images from allowed registry -cis-eks-t1.2.0,C-0167,Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root -cis-eks-t1.2.0,C-0171,If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root -cis-eks-t1.2.0,C-0172,Ensure that the --anonymous-auth argument is set to false -cis-eks-t1.2.0,C-0173,Ensure that the --authorization-mode argument is not set to AlwaysAllow -cis-eks-t1.2.0,C-0174,Ensure that the --client-ca-file argument is set as appropriate -cis-eks-t1.2.0,C-0175,Verify that the --read-only-port argument is set to 0 -cis-eks-t1.2.0,C-0176,Ensure that the --streaming-connection-idle-timeout argument is not set to 0 -cis-eks-t1.2.0,C-0177,Ensure that the --protect-kernel-defaults argument is set to true -cis-eks-t1.2.0,C-0178,Ensure that the --make-iptables-util-chains argument is set to true -cis-eks-t1.2.0,C-0179,Ensure that the --hostname-override argument is not set -cis-eks-t1.2.0,C-0180,Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture -cis-eks-t1.2.0,C-0181,Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate -cis-eks-t1.2.0,C-0183,Verify that the RotateKubeletServerCertificate argument is set to true -cis-eks-t1.2.0,C-0185,Ensure that the cluster-admin role is only used where required -cis-eks-t1.2.0,C-0186,Minimize access to secrets -cis-eks-t1.2.0,C-0187,Minimize wildcard use in Roles and ClusterRoles -cis-eks-t1.2.0,C-0188,Minimize access to create pods -cis-eks-t1.2.0,C-0189,Ensure that default service accounts are not actively used -cis-eks-t1.2.0,C-0190,Ensure that Service Account Tokens are only mounted where necessary -cis-eks-t1.2.0,C-0191,"Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster" -cis-eks-t1.2.0,C-0205,Ensure that the CNI in use supports Network Policies -cis-eks-t1.2.0,C-0206,Ensure that all Namespaces have Network Policies defined -cis-eks-t1.2.0,C-0207,Prefer using secrets as files over secrets as environment variables -cis-eks-t1.2.0,C-0209,Create administrative boundaries between resources using namespaces -cis-eks-t1.2.0,C-0211,Apply Security Context to Your Pods and Containers -cis-eks-t1.2.0,C-0212,The default namespace should not be used -cis-eks-t1.2.0,C-0213,Minimize the admission of privileged containers -cis-eks-t1.2.0,C-0214,Minimize the admission of containers wishing to share the host process ID namespace -cis-eks-t1.2.0,C-0215,Minimize the admission of containers wishing to share the host IPC namespace -cis-eks-t1.2.0,C-0216,Minimize the admission of containers wishing to share the host network namespace -cis-eks-t1.2.0,C-0217,Minimize the admission of containers with allowPrivilegeEscalation -cis-eks-t1.2.0,C-0218,Minimize the admission of root containers -cis-eks-t1.2.0,C-0219,Minimize the admission of containers with added capabilities -cis-eks-t1.2.0,C-0220,Minimize the admission of containers with capabilities assigned -cis-eks-t1.2.0,C-0221,Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider -cis-eks-t1.2.0,C-0222,Minimize user access to Amazon ECR -cis-eks-t1.2.0,C-0223,Minimize cluster access to read-only for Amazon ECR -cis-eks-t1.2.0,C-0225,Prefer using dedicated EKS Service Accounts -cis-eks-t1.2.0,C-0226,Prefer using a container-optimized OS when possible -cis-eks-t1.2.0,C-0227,Restrict Access to the Control Plane Endpoint -cis-eks-t1.2.0,C-0228,Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled -cis-eks-t1.2.0,C-0229,Ensure clusters are created with Private Nodes -cis-eks-t1.2.0,C-0230,Ensure Network Policy is Enabled and set as appropriate -cis-eks-t1.2.0,C-0231,Encrypt traffic to HTTPS load balancers with TLS certificates -cis-eks-t1.2.0,C-0232,Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 -cis-eks-t1.2.0,C-0233,Consider Fargate for running untrusted workloads -cis-eks-t1.2.0,C-0234,Consider external secret storage -cis-eks-t1.2.0,C-0235,Ensure that the kubelet configuration file has permissions set to 644 or more restrictive -cis-eks-t1.2.0,C-0238,Ensure that the kubeconfig file permissions are set to 644 or more restrictive -cis-eks-t1.2.0,C-0242,Hostile multi-tenant workloads -cis-eks-t1.2.0,C-0246,Avoid use of system:masters group -DevOpsBest,C-0004,Resources memory limit and request -DevOpsBest,C-0018,Configured readiness probe -DevOpsBest,C-0044,Container hostPort -DevOpsBest,C-0050,Resources CPU limit and request -DevOpsBest,C-0056,Configured liveness probe -DevOpsBest,C-0061,Pods in default namespace -DevOpsBest,C-0073,Naked PODs -DevOpsBest,C-0074,Containers mounting Docker socket -DevOpsBest,C-0075,Image pull policy on latest tag -DevOpsBest,C-0076,Label usage for resources -DevOpsBest,C-0077,K8s common labels usage -DevOpsBest,C-0253,Deprecated Kubernetes image registry -security,C-0009,Resource limits -security,C-0017,Immutable container filesystem -security,C-0256,Exposure to internet -security,C-0259,Workload with credential access -security,C-0258,Workload with ConfigMap access -security,C-0257,Workload with PVC access -security,C-0260,Missing network policy -security,C-0261,ServiceAccount token mounted -security,C-0255,Workload with secret access -security,C-0041,HostNetwork access -security,C-0044,Container hostPort -security,C-0045,Writable hostPath mount -security,C-0046,Insecure capabilities -security,C-0048,HostPath mount -security,C-0211,Apply Security Context to Your Pods and Containers -security,C-0262,Anonymous access enabled diff --git a/releaseDev/allcontrols.json b/releaseDev/allcontrols.json deleted file mode 100644 index a98b5e7fe..000000000 --- a/releaseDev/allcontrols.json +++ /dev/null @@ -1,5496 +0,0 @@ -{ - "name": "AllControls", - "description": "Contains all the controls from all the frameworks", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of the following registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" - } - ] - }, - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.memory_request_max", - "settings.postureControlInputs.memory_request_min", - "settings.postureControlInputs.memory_limit_max", - "settings.postureControlInputs.memory_limit_min" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.memory_request_max", - "name": "memory_request_max", - "description": "Ensure memory max requests are set" - }, - { - "path": "settings.postureControlInputs.memory_request_min", - "name": "memory_request_min", - "description": "Ensure memory min requests are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_max", - "name": "memory_limit_max", - "description": "Ensure memory max limits are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_min", - "name": "memory_limit_min", - "description": "Ensure memory min limits are set" - } - ], - "description": "memory limits and requests are not set.", - "remediation": "Ensure memory limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-excessive-delete-rights", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" - }, - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - } - ] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-access-dashboard", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - } - ] - }, - { - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-readiness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-mount-potential-credentials-paths", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - } - ] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposed-sensitive-interfaces", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.servicesNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.servicesNames", - "name": "Service names", - "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" - }, - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - } - ] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", - "armoBuiltin": true - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-delete-k8s-events", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" - }, - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "Malicious admission controller (mutating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-ssh-to-pod", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" - }, - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0050", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.cpu_request_max", - "settings.postureControlInputs.cpu_request_min", - "settings.postureControlInputs.cpu_limit_min", - "settings.postureControlInputs.cpu_limit_max" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.cpu_request_max", - "name": "cpu_request_max", - "description": "Ensure CPU max requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_request_min", - "name": "cpu_request_min", - "description": "Ensure CPU min requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_max", - "name": "cpu_limit_max", - "description": "Ensure CPU max limits are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_min", - "name": "cpu_limit_min", - "description": "Ensure CPU min limits are set" - } - ], - "description": "CPU limits and requests are not set.", - "remediation": "Ensure CPU limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - } - ] - }, - { - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - } - ] - }, - { - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "access-container-service-account", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" - }, - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "controlID": "C-0056", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-liveness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "sudo-in-container-entrypoint", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-portforward", - "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-can-portforward-v1", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-impersonate-users-groups", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "naked-pods", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "containers-mounting-docker-socket", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" - } - ] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - } - ] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "label-usage-for-resources", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following labels." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" - } - ] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" - } - ] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - } - ] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0185", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-24348", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0492", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" - } - ] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-23648", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - } - ] - }, - { - "name": "RBAC enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rbac-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-39328", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-47633", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "anonymous-access-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous access is enabled on the cluster", - "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0001", - "C-0002", - "C-0004", - "C-0005", - "C-0007", - "C-0009", - "C-0012", - "C-0013", - "C-0014", - "C-0015", - "C-0016", - "C-0017", - "C-0018", - "C-0020", - "C-0021", - "C-0026", - "C-0030", - "C-0031", - "C-0034", - "C-0035", - "C-0036", - "C-0038", - "C-0039", - "C-0041", - "C-0042", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0049", - "C-0050", - "C-0052", - "C-0053", - "C-0054", - "C-0055", - "C-0056", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0078", - "C-0079", - "C-0081", - "C-0086", - "C-0087", - "C-0088", - "C-0090", - "C-0091", - "C-0262" - ] -} \ No newline at end of file diff --git a/releaseDev/armobest.json b/releaseDev/armobest.json deleted file mode 100644 index d3894ca24..000000000 --- a/releaseDev/armobest.json +++ /dev/null @@ -1,3358 +0,0 @@ -{ - "name": "ArmoBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of the following registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" - } - ] - }, - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "sudo-in-container-entrypoint", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-portforward", - "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-can-portforward-v1", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-impersonate-users-groups", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - } - ] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0185", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-24348", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-0492", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" - } - ] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-23648", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - } - ] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "CVE-2022-3172", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apiregistration.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "APIService" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "apiserverinfo.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", - "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" - } - ] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "CVE-2022-47633", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - } - ] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "verify-image-signature", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "Trusted Cosign public keys" - } - ], - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "has-image-signature", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0001", - "C-0002", - "C-0005", - "C-0009", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0049", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0078", - "C-0079", - "C-0081", - "C-0086", - "C-0087", - "C-0089", - "C-0091", - "C-0236", - "C-0237" - ] -} \ No newline at end of file diff --git a/releaseDev/attack_tracks.json b/releaseDev/attack_tracks.json deleted file mode 100644 index 487ad92b4..000000000 --- a/releaseDev/attack_tracks.json +++ /dev/null @@ -1,59 +0,0 @@ -[ - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "workload-external-track" - }, - "spec": { - "version": null, - "data": { - "name": "Workload Exposure", - "subSteps": [ - { - "name": "Vulnerable Image", - "checksVulnerabilities": true, - "subSteps": [ - { - "name": "Data Access" - }, - { - "name": "Secret Access" - }, - { - "name": "Credential access" - }, - { - "name": "Potential Node exposure" - }, - { - "name": "Persistence" - }, - { - "name": "Network" - } - ] - } - ] - } - } - }, - { - "apiVersion": "regolibrary.kubescape/v1alpha1", - "kind": "AttackTrack", - "metadata": { - "name": "service-destruction" - }, - "spec": { - "version": null, - "data": { - "name": "Workload Exposure", - "subSteps": [ - { - "name": "Service Destruction" - } - ] - } - } - } -] \ No newline at end of file diff --git a/releaseDev/cis-aks-t1.2.0.json b/releaseDev/cis-aks-t1.2.0.json deleted file mode 100644 index 2f084c382..000000000 --- a/releaseDev/cis-aks-t1.2.0.json +++ /dev/null @@ -1,4072 +0,0 @@ -{ - "name": "cis-aks-t1.2.0", - "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", - "attributes": { - "version": "v1.2.0", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Master (Control Plane) Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0254" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0175", - "C-0179", - "C-0182", - "C-0173", - "C-0174", - "C-0176", - "C-0177", - "C-0178", - "C-0180", - "C-0183" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "4.2", - "controlsIDs": [ - "C-0201", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219" - ] - }, - "3": { - "name": "Azure Policy / OPA", - "id": "4.3", - "controlsIDs": [] - }, - "4": { - "name": "CNI Plugin", - "id": "4.4", - "controlsIDs": [ - "C-0206", - "C-0205" - ] - }, - "5": { - "name": "Secrets Management", - "id": "4.5", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "6": { - "name": "Extensible Admission Control", - "id": "4.6", - "controlsIDs": [] - }, - "7": { - "name": "General Policies", - "id": "4.7", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0243", - "C-0250", - "C-0251" - ] - }, - "2": { - "name": "Access and identity options for Azure Kubernetes Service (AKS)", - "id": "5.2", - "controlsIDs": [ - "C-0239", - "C-0241" - ] - }, - "3": { - "name": "Key Management Service (KMS)", - "id": "5.3", - "controlsIDs": [ - "C-0244" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0240", - "C-0245", - "C-0247", - "C-0248", - "C-0252" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0088" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0242", - "C-0249" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Use approved container registries.", - "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - } - ], - "references": [ - "\n\n \n\n " - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." - }, - { - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ] - }, - "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rbac-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - } - ], - "references": [ - "\n\n " - ] - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0182", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "\n\n \n\n \n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" - } - ] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "controlID": "C-0201", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-4.4.1 Ensure latest CNI version is used", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Ensure CNI plugin supports network policies.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-4.5.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "external-secret-storage", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - } - ] - }, - { - "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "\n\n \n\n \n\n ." - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - }, - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-systctls-params", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.systctls is not set.", - "remediation": "Set securityContext.systctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "CIS-4.7.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-rbac-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap", - "Endpoints", - "LimitRange", - "PersistentVolumeClaim", - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController", - "ResourceQuota", - "ServiceAccount", - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ControllerRevision" - ] - }, - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - }, - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - }, - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress", - "NetworkPolicy" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-event-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "events.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "psp-deny-privileged-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostpid", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostipc", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostnetwork", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-root-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - } - ] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowed-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0239", - "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0240", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-cni-enabled-aks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" - } - ] - }, - { - "controlID": "C-0241", - "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-azure-rbac-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", - "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" - } - ] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - } - ] - }, - { - "controlID": "C-0243", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", - "ruleQuery": "armo_builtin", - "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" - } - ] - }, - { - "controlID": "C-0244", - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - } - ] - }, - { - "controlID": "C-0245", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0247", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "restrict-access-to-the-control-plane-endpoint", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0248", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-clusters-are-created-with-private-nodes", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" - } - ] - }, - { - "controlID": "C-0249", - "name": "CIS-5.6.1 Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - }, - { - "controlID": "C-0250", - "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-service-principle-has-read-only-permissions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0251", - "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-role-definitions-in-acr", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0252", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" - } - ] - }, - { - "controlID": "C-0254", - "name": "CIS-2.1.1 Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0088", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0201", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0235", - "C-0238", - "C-0239", - "C-0240", - "C-0241", - "C-0242", - "C-0243", - "C-0244", - "C-0245", - "C-0247", - "C-0248", - "C-0249", - "C-0250", - "C-0251", - "C-0252", - "C-0254" - ] -} \ No newline at end of file diff --git a/releaseDev/cis-eks-t1.2.0.json b/releaseDev/cis-eks-t1.2.0.json deleted file mode 100644 index bb150ea09..000000000 --- a/releaseDev/cis-eks-t1.2.0.json +++ /dev/null @@ -1,4299 +0,0 @@ -{ - "name": "cis-eks-t1.2.0", - "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", - "attributes": { - "version": "v1.2.0", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Control Plane Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0067" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183" - ] - }, - "3": { - "name": "Container Optimized OS", - "id": "3.3", - "controlsIDs": [ - "C-0226" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Policies", - "id": "4.2", - "controlsIDs": [ - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220" - ] - }, - "3": { - "name": "CNI Plugin", - "id": "4.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "4.4", - "controlsIDs": [ - "C-0207", - "C-0234" - ] - }, - "6": { - "name": "General Policies", - "id": "4.6", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0221", - "C-0223", - "C-0078" - ] - }, - "2": { - "name": "Identity and Access Management (IAM)", - "id": "5.2", - "controlsIDs": [ - "C-0225" - ] - }, - "3": { - "name": "AWS EKS Key Management Service", - "id": "5.3", - "controlsIDs": [ - "C-0066" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0232" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0233" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", - "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", - "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ], - "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\nETCDCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", - "references": [ - "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" - ], - "impact_statement": "", - "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." - }, - { - "name": "CIS-2.1.1 Enable audit Logs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", - "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", - "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Use approved container registries.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - } - ], - "references": [ - "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", - "default_value": "" - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/pull/18552" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/issues/22063", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0181", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/41912", - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", - "https://kubernetes.io/docs/imported/release/notes/", - "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/45059", - "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" - } - ] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", - "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", - "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-bind-escalate", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", - "references": [ - "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", - "https://aws.github.io/aws-eks-best-practices/network/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", - "https://octetz.com/posts/k8s-network-policy-apis", - "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - }, - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-systctls-params", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.systctls is not set.", - "remediation": "Set securityContext.systctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "CIS-4.6.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-rbac-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap", - "Endpoints", - "LimitRange", - "PersistentVolumeClaim", - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController", - "ResourceQuota", - "ServiceAccount", - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ControllerRevision" - ] - }, - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - }, - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - }, - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress", - "NetworkPolicy" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-event-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "events.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "psp-deny-privileged-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostpid", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostipc", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-hostnetwork", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-root-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - } - ] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-deny-allowed-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0220", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-required-drop-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" - } - ] - }, - { - "controlID": "C-0221", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure-image-scanning-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "DescribeRepositories" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" - } - ] - }, - { - "controlID": "C-0222", - "name": "CIS-5.1.2 Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-aws-policies-are-present", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "fails if aws policies are not found", - "remediation": "Implement policies to minimize user access to Amazon ECR", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0223", - "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.2.5" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t#node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" - } - ] - }, - { - "controlID": "C-0225", - "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - } - ] - }, - { - "controlID": "C-0226", - "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "alert-container-optimized-os-not-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0227", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-endpointprivateaccess-is-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "controlID": "C-0228", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" - } - ] - }, - { - "controlID": "C-0229", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "controlID": "C-0230", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-network-policy-is-enabled-eks", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0231", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [ - "EKS" - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" - } - ] - }, - { - "controlID": "C-0232", - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "review-roles-with-aws-iam-authenticator", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0233", - "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "alert-fargate-not-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0234", - "name": "CIS-4.4.2 Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "ensure-external-secrets-storage-is-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" - } - ] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [ - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [ - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - } - ] - }, - { - "controlID": "C-0246", - "name": "CIS-4.1.7 Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-manual", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - } - ] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0067", - "C-0078", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0205", - "C-0206", - "C-0207", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220", - "C-0221", - "C-0222", - "C-0223", - "C-0225", - "C-0226", - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231", - "C-0232", - "C-0233", - "C-0234", - "C-0235", - "C-0238", - "C-0242", - "C-0246" - ] -} \ No newline at end of file diff --git a/releaseDev/cis-v1.23-t1.0.1.json b/releaseDev/cis-v1.23-t1.0.1.json deleted file mode 100644 index b7efec379..000000000 --- a/releaseDev/cis-v1.23-t1.0.1.json +++ /dev/null @@ -1,7883 +0,0 @@ -{ - "name": "cis-v1.23-t1.0.1", - "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", - "attributes": { - "version": "v1.0.1", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "1": { - "id": "1", - "name": "Control Plane Components", - "subSections": { - "1": { - "id": "1.1", - "name": "Control Plane Node Configuration Files", - "controlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112" - ] - }, - "2": { - "id": "1.2", - "name": "API Server", - "controlsIDs": [ - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143" - ] - }, - "3": { - "id": "1.3", - "name": "Controller Manager", - "controlsIDs": [ - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150" - ] - }, - "4": { - "id": "1.4", - "name": "Scheduler", - "controlsIDs": [ - "C-0151", - "C-0152" - ] - } - } - }, - "2": { - "name": "etcd", - "id": "2", - "controlsIDs": [ - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159" - ] - }, - "3": { - "name": "Control Plane Configuration", - "id": "3", - "subSections": { - "2": { - "name": "Logging", - "id": "3.2", - "controlsIDs": [ - "C-0160", - "C-0161" - ] - } - } - }, - "4": { - "name": "Worker Nodes", - "id": "4", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "4.1", - "controlsIDs": [ - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171" - ] - }, - "2": { - "name": "Kubelet", - "id": "4.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184" - ] - } - } - }, - "5": { - "name": "Policies", - "id": "5", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "5.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "5.2", - "controlsIDs": [ - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204" - ] - }, - "3": { - "name": "Network Policies and CNI", - "id": "5.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "5.4", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "7": { - "name": "General Policies", - "id": "5.7", - "controlsIDs": [ - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "controlID": "C-0092", - "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0093", - "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0094", - "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0095", - "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0096", - "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0097", - "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0098", - "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0099", - "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0100", - "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0101", - "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0102", - "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0103", - "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0104", - "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0105", - "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0106", - "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0107", - "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0108", - "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0109", - "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0110", - "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0111", - "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0112", - "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0113", - "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0114", - "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0115", - "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0116", - "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0117", - "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0118", - "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0119", - "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0120", - "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0121", - "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0122", - "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0123", - "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0124", - "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0125", - "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0126", - "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0127", - "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0128", - "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0129", - "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0130", - "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0131", - "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0132", - "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0133", - "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0134", - "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0135", - "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0136", - "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0137", - "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0138", - "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0139", - "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0140", - "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0141", - "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0142", - "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" - } - ] - }, - { - "controlID": "C-0143", - "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0144", - "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0145", - "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0146", - "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0147", - "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0148", - "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0149", - "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0150", - "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - } - ] - }, - { - "controlID": "C-0151", - "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - } - ] - }, - { - "controlID": "C-0152", - "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - } - ] - }, - { - "controlID": "C-0153", - "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-tls-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Configure TLS encryption for the etcd service.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0154", - "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0155", - "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0156", - "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-peer-tls-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0157", - "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-peer-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0158", - "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-peer-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - } - ] - }, - { - "controlID": "C-0159", - "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "etcd-unique-ca", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := regex.split(\"=\", command)\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0160", - "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-native-cis", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - } - ] - }, - { - "controlID": "C-0161", - "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "audit-policy-content", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n#rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" - } - ] - }, - { - "controlID": "C-0162", - "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0163", - "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0164", - "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0165", - "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0166", - "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0167", - "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0168", - "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0169", - "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0170", - "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - } - ] - }, - { - "controlID": "C-0171", - "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - } - ] - }, - { - "controlID": "C-0172", - "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0173", - "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0174", - "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0175", - "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "read-only-port-enabled-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0176", - "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0177", - "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0178", - "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-ip-tables", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0179", - "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-hostname-override", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - } - ] - }, - { - "controlID": "C-0180", - "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-event-qps", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0181", - "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0182", - "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "controlID": "C-0183", - "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" - } - ] - }, - { - "controlID": "C-0184", - "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "kubelet-strong-cryptographics-ciphers", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "cluster-admin-role", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-create-pod", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "namespace-without-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-can-bind-escalate", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "controlID": "C-0192", - "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "controlID": "C-0193", - "name": "CIS-5.2.2 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0194", - "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0195", - "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0196", - "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0197", - "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0198", - "name": "CIS-5.2.7 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0199", - "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0200", - "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0201", - "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0202", - "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0203", - "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0204", - "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - } - ] - }, - { - "controlID": "C-0205", - "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - } - ] - }, - { - "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-secrets-in-env-var", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "CIS-5.4.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "external-secret-storage", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - } - ] - }, - { - "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "list-all-namespaces", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "set-seccomp-profile-RuntimeDefault", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile as RuntimeDefault", - "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" - } - ] - }, - { - "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - }, - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-systctls-params", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.systctls is not set.", - "remediation": "Set securityContext.systctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "CIS-5.7.4 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-rbac-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap", - "Endpoints", - "LimitRange", - "PersistentVolumeClaim", - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-core2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController", - "ResourceQuota", - "ServiceAccount", - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ControllerRevision" - ] - }, - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - }, - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - }, - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-other2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress", - "NetworkPolicy" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "resources-event-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "events.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112", - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143", - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150", - "C-0151", - "C-0152", - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159", - "C-0160", - "C-0161", - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] -} \ No newline at end of file diff --git a/releaseDev/controls.json b/releaseDev/controls.json deleted file mode 100644 index e5cbc7b9c..000000000 --- a/releaseDev/controls.json +++ /dev/null @@ -1,6728 +0,0 @@ -[ - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "rulesNames": [ - "rule-allow-privilege-escalation" - ], - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "enforce-kubelet-client-tls-authentication-updated" - ], - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0100", - "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0140", - "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "rulesNames": [ - "rule-can-list-get-secrets", - "rule-can-list-get-secrets-v1" - ], - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0164", - "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "rulesNames": [ - "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "validate-kubelet-tls-configuration-updated" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0192", - "name": "Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-applied" - ], - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "rulesNames": [ - "immutable-container-filesystem" - ], - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0144", - "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "rulesNames": [ - "internal-networking" - ], - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0170", - "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "rulesNames": [ - "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0143", - "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers" - ], - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0199", - "name": "Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0200", - "name": "Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-restricted-applied" - ], - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0152", - "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "rulesNames": [ - "rule-access-dashboard", - "rule-access-dashboard-subject-v1", - "rule-access-dashboard-wl-v1" - ], - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0142", - "name": "Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-encryption-providers-are-appropriately-configured" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-cni-in-use-supports-network-policies" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-can-create-pod" - ], - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rulesNames": [ - "list-all-validating-webhooks" - ], - "controlID": "C-0036", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0115", - "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set" - ], - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "rulesNames": [ - "rule-can-impersonate-users-groups", - "rule-can-impersonate-users-groups-v1" - ], - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0133", - "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0105", - "name": "Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "rulesNames": [ - "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0249", - "name": "Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0163", - "name": "Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "rulesNames": [ - "automount-service-account" - ], - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0254", - "name": "Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0203", - "name": "Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "rulesNames": [ - "CVE-2022-0492" - ], - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0229", - "name": "Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks" - ], - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Workloads with Critical vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rulesNames": [ - "exposed-critical-pods" - ], - "long_description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service assigned to them.", - "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort services and checks image vulnerability information to see if the image has critical vulnerabilities.", - "controlID": "C-0083", - "baseScore": 8.0, - "example": "@controls/examples/c83.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0114", - "name": "Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-token-auth-file-parameter-is-not-set" - ], - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "automount-default-service-account", - "namespace-without-service-account" - ], - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-hostipc" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-streaming-connection-idle-timeout" - ], - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0099", - "name": "Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "rulesNames": [ - "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0241", - "name": "Use Azure RBAC for Kubernetes Authorization.", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-azure-rbac-is-set" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pods-in-default-namespace", - "resources-rbac-in-default-namespace", - "resources-core1-in-default-namespace", - "resources-core2-in-default-namespace", - "resources-other1-in-default-namespace", - "resources-other2-in-default-namespace", - "resources-secret-in-default-namespace", - "resources-event-in-default-namespace" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0138", - "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive" - ], - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "rulesNames": [ - "rule-identify-blocklisted-image-registries" - ], - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "rulesNames": [ - "container-image-repository" - ], - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0102", - "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "rulesNames": [ - "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "rulesNames": [ - "Symlink-Exchange-Can-Allow-Host-Filesystem-Access" - ], - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0222", - "name": "Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-aws-policies-are-present" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "rulesNames": [ - "rule-can-delete-k8s-events", - "rule-can-delete-k8s-events-v1" - ], - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0147", - "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate" - ], - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rulesNames": [ - "rule-can-portforward", - "rule-can-portforward-v1" - ], - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposure to internet", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Workload Exposure" - ] - }, - { - "attackTrack": "", - "categories": [ - "" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "rulesNames": [ - "exposure-to-internet" - ], - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0247", - "name": "Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "restrict-access-to-the-control-plane-endpoint" - ], - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0248", - "name": "Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-clusters-are-created-with-private-nodes" - ], - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0246", - "name": "Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-manual" - ], - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0239", - "name": "Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-default-service-accounts-has-only-default-roles" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0153", - "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-tls-enabled" - ], - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "rulesNames": [ - "non-root-containers" - ], - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-privileged-container" - ], - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "rulesNames": [ - "linux-hardening" - ], - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0132", - "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0244", - "name": "Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "secret-etcd-encryption-cloud" - ], - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "automount-service-account" - ], - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0127", - "name": "Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-NodeRestriction-is-set" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-rotate-kubelet-server-certificate" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0117", - "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0141", - "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0198", - "name": "Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-restricted-applied" - ], - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0093", - "name": "Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "rulesNames": [ - "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0169", - "name": "Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "rulesNames": [ - "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0123", - "name": "Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set" - ], - "baseScore": 4, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0116", - "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0193", - "name": "Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "rulesNames": [ - "K8s common labels usage" - ], - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0148", - "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate" - ], - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0204", - "name": "Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "rulesNames": [ - "host-pid-ipc-privileges" - ], - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0245", - "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "encrypt-traffic-to-https-load-balancers-with-tls-certificates" - ], - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "rulesNames": [ - "serviceaccount-token-mount" - ], - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0113", - "name": "Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false" - ], - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Workload with ConfigMap access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-configmap" - ], - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "rulesNames": [ - "alert-rw-hostpath" - ], - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0118", - "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow" - ], - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0233", - "name": "Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "alert-fargate-not-in-use" - ], - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "rulesNames": [ - "alert-mount-potential-credentials-paths" - ], - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-hostpid" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0243", - "name": "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider" - ], - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0250", - "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-service-principle-has-read-only-permissions" - ], - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-can-list-get-secrets-v1" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0135", - "name": "Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-authorization-mode-alwaysAllow" - ], - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "rulesNames": [ - "internal-networking" - ], - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0225", - "name": "Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-default-service-accounts-has-only-default-roles", - "automount-default-service-account" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rulesNames": [ - "exec-into-container", - "exec-into-container-v1" - ], - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "rulesNames": [ - "rule-can-update-configmap", - "rule-can-update-configmap-v1" - ], - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "rulesNames": [ - "sudo-in-container-entrypoint" - ], - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0134", - "name": "Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0151", - "name": "Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-scheduler-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0157", - "name": "Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-peer-client-auth-cert" - ], - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0101", - "name": "Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "rulesNames": [ - "rule-can-ssh-to-pod", - "rule-can-ssh-to-pod-v1" - ], - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "rulesNames": [ - "nginx-ingress-snippet-annotation-vulnerability" - ], - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "rulesNames": [ - "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0124", - "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used" - ], - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "rulesNames": [ - "naked-pods" - ], - "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0150", - "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "rulesNames": [ - "CVE-2022-47633" - ], - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0194", - "name": "Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "rulesNames": [ - "access-container-service-account", - "access-container-service-account-v1" - ], - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "rulesNames": [ - "instance-metadata-api-access" - ], - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0103", - "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "set-seccomp-profile-RuntimeDefault" - ], - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "rulesNames": [ - "CVE-2022-39328" - ], - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-cpu-limit-and-request" - ], - "controlID": "C-0050", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "rulesNames": [ - "CVE-2022-0185" - ], - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-list-all-cluster-admins-v1" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0104", - "name": "Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "rulesNames": [ - "ensure-that-the-admin.conf-file-permissions-are-set-to-600" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0251", - "name": "Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "list-role-definitions-in-acr" - ], - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workloads with excessive amount of vulnerabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "remediation": "Update your workload images as soon as possible when fixes become available.", - "rulesNames": [ - "excessive_amount_of_vulnerabilities_pods" - ], - "long_description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "test": "This control enumerates workloads and checks if they have excessive amount of vulnerabilities in their container images. The threshold of \u201cexcessive number\u201d is configurable.", - "controlID": "C-0085", - "baseScore": 6.0, - "example": "@controls/examples/c85.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0226", - "name": "Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "alert-container-optimized-os-not-in-use" - ], - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "rulesNames": [ - "resource-policies" - ], - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "rulesNames": [ - "rule-credentials-in-env-var", - "rule-credentials-configmap" - ], - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0221", - "name": "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-image-scanning-enabled-cloud" - ], - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-event-qps" - ], - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0129", - "name": "Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0161", - "name": "Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "audit-policy-content" - ], - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "rulesNames": [ - "container-hostPort" - ], - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0195", - "name": "Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0108", - "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "rulesNames": [ - "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0165", - "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "rulesNames": [ - "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "rulesNames": [ - "secret-etcd-encryption-cloud", - "etcd-encryption-native" - ], - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0139", - "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0197", - "name": "Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-restricted-applied" - ], - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "rulesNames": [ - "insecure-port-flag" - ], - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0120", - "name": "Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC" - ], - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "has-image-signature" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "rulesNames": [ - "psp-enabled-cloud", - "psp-enabled-native" - ], - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true - }, - "rulesNames": [ - "verify-image-signature" - ], - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-hostnetwork" - ], - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workloads with RCE vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rulesNames": [ - "exposed-rce-pods" - ], - "long_description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", - "test": "This control enumerates external facing workloads, that have LoadBalancer or NodePort service and checks the image vulnerability information for the RCE vulnerability.", - "controlID": "C-0084", - "baseScore": 8.0, - "example": "@controls/examples/c84.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0158", - "name": "Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-peer-auto-tls-disabled" - ], - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "rulesNames": [ - "configured-readiness-probe" - ], - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "anonymous-requests-to-kubelet-service-updated" - ], - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0121", - "name": "Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-EventRateLimit-is-set" - ], - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0154", - "name": "Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-client-auth-cert" - ], - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0094", - "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "rulesNames": [ - "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0128", - "name": "Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0" - ], - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0131", - "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate" - ], - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Network" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "rulesNames": [ - "ensure_network_policy_configured_in_labels" - ], - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-root-container" - ], - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0231", - "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-https-loadbalancers-encrypted-with-tls-aws" - ], - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0110", - "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "rulesNames": [ - "containers-mounting-docker-socket" - ], - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0168", - "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "rulesNames": [ - "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0146", - "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true" - ], - "baseScore": 4, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0252", - "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled" - ], - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "rulesNames": [ - "anonymous-requests-to-kubelet-service-updated" - ], - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "anonymous-access-enabled" - ], - "baseScore": 5, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "rulesNames": [ - "rule-list-all-cluster-admins", - "rule-list-all-cluster-admins-v1" - ], - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "external-secret-storage" - ], - "baseScore": 5, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "rulesNames": [ - "exposed-sensitive-interfaces", - "exposed-sensitive-interfaces-v1" - ], - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "rulesNames": [ - "image-pull-policy-is-not-set-to-always" - ], - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-allowprivilegeescalation" - ], - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0149", - "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0125", - "name": "Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-ServiceAccount-is-set" - ], - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-can-bind-escalate", - "rule-can-impersonate-users-groups-v1" - ], - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0202", - "name": "Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "cluster-admin-role" - ], - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0162", - "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "internal-networking" - ], - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "rulesNames": [ - "label-usage-for-resources" - ], - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0111", - "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-protect-kernel-defaults" - ], - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-secrets-in-env-var" - ], - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "rulesNames": [ - "resources-memory-limit-and-request" - ], - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "rulesNames": [ - "CVE-2022-3172" - ], - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Workload with secret access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-secrets" - ], - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "rulesNames": [ - "insecure-capabilities" - ], - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-hostile-multitenant-workloads" - ], - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "name": "Workload with credential access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "rulesNames": [ - "rule-credentials-in-env-var" - ], - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0112", - "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "rulesNames": [ - "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0230", - "name": "Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-network-policy-is-enabled-eks" - ], - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0240", - "name": "Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-cni-enabled-aks" - ], - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0136", - "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate" - ], - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "rulesNames": [ - "CVE-2022-24348" - ], - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0137", - "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate" - ], - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0160", - "name": "Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "k8s-audit-logs-enabled-native-cis" - ], - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0096", - "name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "rulesNames": [ - "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0145", - "name": "Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-controller-manager-profiling-argument-is-set-to-false" - ], - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0156", - "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-peer-tls-enabled" - ], - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0119", - "name": "Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-authorization-mode-argument-includes-Node" - ], - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-rotate-certificates" - ], - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": { - "armoBuiltin": true - }, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "rulesNames": [ - "rule-identify-old-k8s-registry" - ], - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-ip-tables" - ], - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0227", - "name": "Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-endpointprivateaccess-is-enabled" - ], - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "rulesNames": [ - "ingress-and-egress-blocked" - ], - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0097", - "name": "Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "rulesNames": [ - "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "rulesNames": [ - "rule-excessive-delete-rights", - "rule-excessive-delete-rights-v1" - ], - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "rulesNames": [ - "pods-in-default-namespace" - ], - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "rulesNames": [ - "host-network-access" - ], - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0159", - "name": "Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-unique-ca" - ], - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0130", - "name": "Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-api-server-audit-log-path-argument-is-set" - ], - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "rulesNames": [ - "alert-any-hostpath" - ], - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "rulesNames": [ - "rule-privilege-escalation" - ], - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "rule-privilege-escalation", - "immutable-container-filesystem", - "non-root-containers", - "drop-capability-netraw", - "set-seLinuxOptions", - "set-seccomp-profile", - "set-procmount-default", - "set-fsgroup-value", - "set-fsgroupchangepolicy-value", - "set-systctls-params", - "set-supplementalgroups-values" - ], - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0228", - "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks" - ], - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0220", - "name": "Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-required-drop-capabilities" - ], - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0167", - "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "rulesNames": [ - "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "rulesNames": [ - "configured-liveness-probe" - ], - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "controlID": "C-0056", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "list-all-namespaces" - ], - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0095", - "name": "Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "rulesNames": [ - "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0122", - "name": "Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set" - ], - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0107", - "name": "Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "rulesNames": [ - "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0098", - "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "rulesNames": [ - "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0196", - "name": "Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-baseline-applied" - ], - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "rulesNames": [ - "CVE-2022-23648" - ], - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "read-only-port-enabled-updated" - ], - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "rulesNames": [ - "enforce-kubelet-client-tls-authentication-updated" - ], - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0184", - "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-strong-cryptographics-ciphers" - ], - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "rulesNames": [ - "k8s-audit-logs-enabled-cloud", - "k8s-audit-logs-enabled-native" - ], - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0166", - "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "rulesNames": [ - "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0109", - "name": "Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "rulesNames": [ - "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "RBAC enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "rulesNames": [ - "rbac-enabled-cloud", - "rbac-enabled-native" - ], - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0106", - "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "rulesNames": [ - "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (mutating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rulesNames": [ - "list-all-mutating-webhooks" - ], - "controlID": "C-0039", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "pod-security-admission-restricted-applied" - ], - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0126", - "name": "Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set" - ], - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "rulesNames": [ - "rule-deny-cronjobs" - ], - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0155", - "name": "Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "etcd-auto-tls-disabled" - ], - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0092", - "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "rulesNames": [ - "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Workload with PVC access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "rulesNames": [ - "workload-mounted-pvc" - ], - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0223", - "name": "Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure_nodeinstancerole_has_right_permissions_for_ecr" - ], - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive" - ], - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0232", - "name": "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "review-roles-with-aws-iam-authenticator" - ], - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0234", - "name": "Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "ensure-external-secrets-storage-is-in-use" - ], - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "kubelet-hostname-override" - ], - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "rulesNames": [ - "psp-deny-allowed-capabilities" - ], - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } -] \ No newline at end of file diff --git a/releaseDev/default_config_inputs.json b/releaseDev/default_config_inputs.json deleted file mode 100644 index 3ad252704..000000000 --- a/releaseDev/default_config_inputs.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "name": "default", - "attributes": { - "armoBuiltin": true - }, - "scope": { - "designatorType": "attributes", - "attributes": {} - }, - "settings": { - "postureControlInputs": { - "imageRepositoryAllowList": [], - "trustedCosignPublicKeys": [], - "insecureCapabilities": [ - "SETPCAP", - "NET_ADMIN", - "NET_RAW", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PTRACE", - "SYS_ADMIN", - "SYS_BOOT", - "MAC_OVERRIDE", - "MAC_ADMIN", - "PERFMON", - "ALL", - "BPF" - ], - "listOfDangerousArtifacts": [ - "bin/bash", - "sbin/sh", - "bin/ksh", - "bin/tcsh", - "bin/zsh", - "usr/bin/scsh", - "bin/csh", - "bin/busybox", - "usr/bin/busybox" - ], - "publicRegistries": [], - "sensitiveInterfaces": [ - "nifi", - "argo-server", - "weave-scope-app", - "kubeflow", - "kubernetes-dashboard", - "jenkins", - "prometheus-deployment" - ], - "max_critical_vulnerabilities": [ - "5" - ], - "max_high_vulnerabilities": [ - "10" - ], - "sensitiveValuesAllowed": [ - "AllowedValue" - ], - "sensitiveKeyNames": [ - "aws_access_key_id", - "aws_secret_access_key", - "azure_batchai_storage_account", - "azure_batchai_storage_key", - "azure_batch_account", - "azure_batch_key", - "secret", - "key", - "password", - "pwd", - "token", - "jwt", - "bearer", - "credential" - ], - "sensitiveValues": [ - "BEGIN \\w+ PRIVATE KEY", - "PRIVATE KEY", - "eyJhbGciO", - "JWT", - "Bearer", - "_key_", - "_secret_" - ], - "servicesNames": [ - "nifi-service", - "argo-server", - "minio", - "postgres", - "workflow-controller-metrics", - "weave-scope-app", - "kubernetes-dashboard" - ], - "untrustedRegistries": [], - "memory_request_max": [], - "memory_request_min": [], - "memory_limit_max": [], - "memory_limit_min": [], - "cpu_request_max": [], - "cpu_request_min": [], - "cpu_limit_max": [], - "cpu_limit_min": [], - "wlKnownNames": [ - "coredns", - "kube-proxy", - "event-exporter-gke", - "kube-dns", - "17-default-backend", - "metrics-server", - "ca-audit", - "ca-dashboard-aggregator", - "ca-notification-server", - "ca-ocimage", - "ca-oracle", - "ca-posture", - "ca-rbac", - "ca-vuln-scan", - "ca-webhook", - "ca-websocket", - "clair-clair" - ], - "recommendedLabels": [ - "app", - "tier", - "phase", - "version", - "owner", - "env" - ], - "k8sRecommendedLabels": [ - "app.kubernetes.io/name", - "app.kubernetes.io/instance", - "app.kubernetes.io/version", - "app.kubernetes.io/component", - "app.kubernetes.io/part-of", - "app.kubernetes.io/managed-by", - "app.kubernetes.io/created-by" - ] - } - } -} \ No newline at end of file diff --git a/releaseDev/devopsbest.json b/releaseDev/devopsbest.json deleted file mode 100644 index ba2dff28e..000000000 --- a/releaseDev/devopsbest.json +++ /dev/null @@ -1,987 +0,0 @@ -{ - "name": "DevOpsBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-memory-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.memory_request_max", - "settings.postureControlInputs.memory_request_min", - "settings.postureControlInputs.memory_limit_max", - "settings.postureControlInputs.memory_limit_min" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.memory_request_max", - "name": "memory_request_max", - "description": "Ensure memory max requests are set" - }, - { - "path": "settings.postureControlInputs.memory_request_min", - "name": "memory_request_min", - "description": "Ensure memory min requests are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_max", - "name": "memory_limit_max", - "description": "Ensure memory max limits are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_min", - "name": "memory_limit_min", - "description": "Ensure memory min limits are set" - } - ], - "description": "memory limits and requests are not set.", - "remediation": "Ensure memory limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - } - ] - }, - { - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-readiness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0050", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resources-cpu-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.cpu_request_max", - "settings.postureControlInputs.cpu_request_min", - "settings.postureControlInputs.cpu_limit_min", - "settings.postureControlInputs.cpu_limit_max" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.cpu_request_max", - "name": "cpu_request_max", - "description": "Ensure CPU max requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_request_min", - "name": "cpu_request_min", - "description": "Ensure CPU min requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_max", - "name": "cpu_limit_max", - "description": "Ensure CPU max limits are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_min", - "name": "cpu_limit_min", - "description": "Ensure CPU min limits are set" - } - ], - "description": "CPU limits and requests are not set.", - "remediation": "Ensure CPU limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - } - ] - }, - { - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "controlID": "C-0056", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "configured-liveness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - } - ] - }, - { - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "naked-pods", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - } - ] - }, - { - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "containers-mounting-docker-socket", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" - } - ] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - } - ] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "label-usage-for-resources", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following labels." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" - } - ] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" - } - ] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": { - "armoBuiltin": true - }, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-identify-old-k8s-registry", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Identifying if pod container images are from deprecated K8s registry", - "remediation": "Use images new registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0004", - "C-0018", - "C-0044", - "C-0050", - "C-0056", - "C-0061", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0253" - ] -} \ No newline at end of file diff --git a/releaseDev/exceptions.json b/releaseDev/exceptions.json deleted file mode 100644 index f0ebc1a15..000000000 --- a/releaseDev/exceptions.json +++ /dev/null @@ -1,6854 +0,0 @@ -[ - { - "name": "exclude-pod-kube-apiserver", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-apiserver-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0013" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0013 " - }, - { - "controlID": "c-0020" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0016" - }, - { - "controlID": "c-0004" - }, - { - "controlID": "c-0050" - }, - { - "controlID": "c-0009" - }, - { - "controlID": "c-0048" - }, - { - "controlID": "c-0041" - } - ] - }, - { - "name": "exclude-eks-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "aws-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "aws-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "eventrouter" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "ebs-csi-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ebs-csi-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ebs-csi-node-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "aws-cloud-provider" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "aws-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eks-admin" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eks-vpc-resource-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "tagging-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "vpc-resource-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "eventrouter" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ebs-csi-controller-sa" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ebs-csi-node-sa" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:fargate-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-28", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:addon-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:certificate-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "eks:node-manager" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-eks-resources-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:masters" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-kubescape-prometheus-security-context", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-kubescape-prometheus-deployment-allowed-registry", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-default-namespace-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "name": "kubescape", - "namespace": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-default-namespace-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-default-namespace-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "default", - "namespace": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-otel", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - } - ] - }, - { - "name": "exclude-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage-aggregated-apiserver-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0186" - }, - { - "controlID": "c-0053" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-service-accounts-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-service-accounts-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "storage", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-security-context-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0055" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - }, - { - "controlID": "c-0058" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-allowed-registry-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0001" - }, - { - "controlID": "c-0078" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - } - ] - }, - { - "name": "exclude-kubescape-deployment-ingress-and-egress", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "node-agent", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0045" - }, - { - "controlID": "c-0046" - }, - { - "controlID": "c-0048" - }, - { - "controlID": "c-0057" - }, - { - "controlID": "c-0013" - }, - { - "controlID": "c-0016" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0074" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - } - ] - }, - { - "name": "exclude-ks-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "ks-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-kubescape-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0007" - }, - { - "controlID": "c-0015" - } - ] - }, - { - "name": "exclude-kubescape-default-service-account", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "default", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0189" - }, - { - "controlID": "c-0190" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "ks-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kubescape-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "storage-sa", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-kubescape-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "node-agent-service-account", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0034" - }, - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-kubescape-otel", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "otel-collector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - } - ] - }, - { - "name": "exclude-kubescape-host-scanner-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "host-scanner", - "namespace": "kubescape-host-scanner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kubescape-host-scanner-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "host-scanner", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-schedulers-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubevuln-schedule-.*", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubescape-registry-scan-.*", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubevuln-scheduler", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-schedulers-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "name": "kubescape-scheduler", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0026" - }, - { - "controlID": "c-0076" - }, - { - "controlID": "c-0077" - }, - { - "controlID": "c-0210" - }, - { - "controlID": "c-0211" - } - ] - }, - { - "name": "exclude-storage-apiserver", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "storage-apiserver", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0030" - }, - { - "controlID": "c-0034" - }, - { - "controlID": "c-0055" - }, - { - "controlID": "c-0056" - }, - { - "controlID": "c-0017" - }, - { - "controlID": "c-0018" - }, - { - "controlID": "c-0076" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "etcd-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-system" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "storage-provisioner" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-scheduler-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-system-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-controller-manager-.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-minikube-kube-public-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-public" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-minikube-kube-public-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-public", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-minikube-kube-node-lease-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-node-lease" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-minikube-kube-node-lease-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-node-lease", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "default" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "certificate-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "bootstrap-signer" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "clusterrole-aggregation-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "root-ca-cert-publisher" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pvc-protection-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "statefulset-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ttl-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "service-account-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "horizontal-pod-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "expand-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "replicaset-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "replication-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "resourcequota-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpoint-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpointslice-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpointslicemirroring-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ephemeral-volume-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-21", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pv-protection-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "job-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "daemon-set-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "deployment-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "generic-garbage-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "persistent-volume-binder" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-28", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "storage-provisioner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "token-cleaner" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "namespace-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-32", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cronjob-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-33", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "attachdetach-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-34", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "service-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-35", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "disruption-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-36", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pod-garbage-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-37", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ttl-after-finished-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-scheduler", - "kind": "User" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-controller-manager", - "kind": "User" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:masters", - "kind": "Group" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "azure-ip-masq-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "cloud-node-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "cloud-node-manager-windows" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-deployments-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "omsagent-rs" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-pods-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "cloud-node-manager-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "csi-azuredisk-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "csi-azurefile-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "omsagent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-pods-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-aks-kube-system-services-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-services-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azurefile-node" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azurefile-node-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "omsagent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-daemonsets-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "omsagent-win" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-autoscaler-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-replicasets-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "omsagent-rs-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-namespaces-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-system" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "azure-cloud-provider" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cloud-node-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azuredisk-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azurefile-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "omsagent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-46", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "default", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-47", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-node-lease", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-48", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-public", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-49", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-config-reconciled" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-50", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "cluster-autoscaler-status" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-51", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "container-azm-ms-aks-k8scluster" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-52", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-53", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-54", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-custom" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-55", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "extension-apiserver-authentication" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-56", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-57", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "omsagent-rs-config" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-58", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "overlay-upgrade-data" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-59", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-webhook-admission-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-60", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-node-mutating-webhook" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-61", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "aks-node-validating-webhook" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-62", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:masters" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-63", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:nodes" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-64", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "clusterAdmin" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-65", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-controller-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-aks-kube-system-sa-66", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-scheduler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-gke-kube-system-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-proxy-[A-Za-z0-9-]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "metadata-proxy-v[0-9.]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "pdcsi-node-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "anetd" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "netd" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-big" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-small" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-max" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke.*" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nccl-fastsocket-installer" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "filestore-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "pdcsi-node" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-17", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-18", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "anetd-win" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-19", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-20", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent-windows" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-22", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-23", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-large" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-24", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-medium" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-25", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "image-package-extractor" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-26", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "CronJob", - "namespace": "kube-system", - "name": "image-package-extractor-cleanup" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-27", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin-small" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-29", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-30", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "egress-nat-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-31", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "event-exporter-gke" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-32", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-33", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller-horizontal-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-34", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "kube-dns-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-35", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server-v[0-9.]+" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-36", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent-autoscaler" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-37", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentd-elasticsearch" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-38", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-gke-kube-system-resources-39", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "l7-default-backend" - } - } - ], - "posturePolicies": [ - { - "controlID": "C-.*" - } - ] - }, - { - "name": "exclude-kube-system-service-accounts-38", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent-cpha" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-49", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cloud-provider" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-71", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-78", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns-autoscaler" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-79", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "netd" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-80", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metadata-proxy" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-81", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-82", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cilium" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-83", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-84", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "gke-metrics-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-85", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "egress-nat-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-86", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-87", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "event-exporter-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-88", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-cpha" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-89", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "fluentbit-gke" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-90", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pdcsi-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-91", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-92", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "filestorecsi-node-sa" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-service-accounts-93", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-users-and-groups-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "namespace": "kube-system", - "name": "system:vpa-recommender" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-kube-system-users-and-groups-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "namespace": "kube-system", - "name": "system:anet-operator" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:clustermetrics" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:controller:glbc" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:l7-lb-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:managed-certificate-controller" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gke-common-webhooks" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gcp-controller-manager" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:resource-tracker" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:storageversionmigrator" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-users-and-groups-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kubestore-collector" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-1", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "ca-validate-cfg" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-2", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-3", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "validation-webhook.snapshot.storage.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-4", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "nodelimit.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-5", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "gkepolicy.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-6", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "validation-webhook.snapshot.storage.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "APIService", - "name": "v1beta1.metrics.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "pod-ready.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "ca-mutate-cfg" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-10", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "neg-annotation.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-11", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-12", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-13", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "workload-defaulter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-14", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-15", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "gke-vpa-webhook-config" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-system-resources-16", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "filestorecsi-mutation-webhook.storage.k8s.io" - } - } - ], - "posturePolicies": [ - {} - ] - }, - { - "name": "exclude-service-accounts-7", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kube-controller-manager", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-service-accounts-8", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "kube-scheduler", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0053" - } - ] - }, - { - "name": "exclude-service-accounts-9", - "policyType": "postureExceptionPolicy", - "actions": [ - "alertOnly" - ], - "attributes": { - "systemException": true - }, - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "name": "route-controller", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "controlID": "c-0053" - } - ] - } -] \ No newline at end of file diff --git a/releaseDev/frameworks.json b/releaseDev/frameworks.json deleted file mode 100644 index eb7a81b00..000000000 --- a/releaseDev/frameworks.json +++ /dev/null @@ -1,11419 +0,0 @@ -[ - { - "name": "AllControls", - "description": "Contains all the controls from all the frameworks", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (mutating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0050", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "controlID": "C-0056", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "RBAC enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ] - }, - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "An internal security audit identified a race condition in the Grafana codebase, which allowed an unauthenticated user to query an arbitrary endpoint in Grafana. A race condition in the HTTP context creation could result in an HTTP request being assigned the authentication/authorization middlewares of another call. Under heavy load, it is possible that a call protected by a privileged middleware receives the middleware of a public query instead. As a result, an unauthenticated user can successfully query protected endpoints. The CVSS score for this vulnerability is 9.8 Critical.", - "test": "This control test for vulnerable versions of Grafana (between 9.2 and 9.2.3)", - "controlID": "C-0090", - "baseScore": 9.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0001", - "C-0002", - "C-0004", - "C-0005", - "C-0007", - "C-0009", - "C-0012", - "C-0013", - "C-0014", - "C-0015", - "C-0016", - "C-0017", - "C-0018", - "C-0020", - "C-0021", - "C-0026", - "C-0030", - "C-0031", - "C-0034", - "C-0035", - "C-0036", - "C-0038", - "C-0039", - "C-0041", - "C-0042", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0049", - "C-0050", - "C-0052", - "C-0053", - "C-0054", - "C-0055", - "C-0056", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0078", - "C-0079", - "C-0081", - "C-0086", - "C-0087", - "C-0088", - "C-0090", - "C-0091", - "C-0262" - ] - }, - { - "name": "MITRE", - "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Malicious admission controller (mutating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0007", - "C-0012", - "C-0014", - "C-0015", - "C-0020", - "C-0021", - "C-0026", - "C-0031", - "C-0035", - "C-0036", - "C-0037", - "C-0039", - "C-0042", - "C-0045", - "C-0048", - "C-0052", - "C-0053", - "C-0054", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] - }, - { - "name": "cis-aks-t1.2.0", - "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", - "attributes": { - "version": "v1.2.0", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Master (Control Plane) Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0254" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0175", - "C-0179", - "C-0182", - "C-0173", - "C-0174", - "C-0176", - "C-0177", - "C-0178", - "C-0180", - "C-0183" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "4.2", - "controlsIDs": [ - "C-0201", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219" - ] - }, - "3": { - "name": "Azure Policy / OPA", - "id": "4.3", - "controlsIDs": [] - }, - "4": { - "name": "CNI Plugin", - "id": "4.4", - "controlsIDs": [ - "C-0206", - "C-0205" - ] - }, - "5": { - "name": "Secrets Management", - "id": "4.5", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "6": { - "name": "Extensible Admission Control", - "id": "4.6", - "controlsIDs": [] - }, - "7": { - "name": "General Policies", - "id": "4.7", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0078", - "C-0243", - "C-0250", - "C-0251" - ] - }, - "2": { - "name": "Access and identity options for Azure Kubernetes Service (AKS)", - "id": "5.2", - "controlsIDs": [ - "C-0239", - "C-0241" - ] - }, - "3": { - "name": "Key Management Service (KMS)", - "id": "5.3", - "controlsIDs": [ - "C-0244" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0240", - "C-0245", - "C-0247", - "C-0248", - "C-0252" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0088" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0242", - "C-0249" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Use approved container registries.", - "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "references": [ - "\n\n \n\n " - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry." - }, - { - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ] - }, - "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "long_description": "Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.", - "test": "Testing API server or managed Kubernetes vendor API to determine if RBAC is enabled", - "controlID": "C-0088", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [], - "references": [ - "\n\n " - ] - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": { \"anonymous\": { \"enabled\": false }` argument is set to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"anonymous\":{\"enabled\":false}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"authentication\": \"webhook\": \"enabled\"` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"authentication\": {\"webhook\": { \"enabled\": is set to true`.\n\n If the `\"authentication\": {\"mode\": {` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `\"authentication\": {\"mode\": {` to something other than `AlwaysAllow`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"webhook\":{\"enabled\":true}` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `\"x509\": {\"clientCAFile:\"` set to the location of the client certificate authority file.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\nsudo more /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `\"x509\": {\"clientCAFile:\"` argument exists and is set to the location of the client certificate authority file.\n\n If the `\"x509\": {\"clientCAFile:\"` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `\"authentication\": { \"x509\": {\"clientCAFile:\"` to the location of the client certificate authority file.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication.. x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `makeIPTablesUtilChains` set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that if the `makeIPTablesUtilChains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "\n\n \n\n \n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n If using a Kubelet configuration file, check that there is an entry for `RotateKubeletServerCertificate` is set to `true`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-4.4.1 Ensure latest CNI version is used", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Ensure CNI plugin supports network policies.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "\n\n \n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.5.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Azure AKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "\n\n \n\n \n\n ." - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "When you create an AKS cluster, the following namespaces are available:\n\n NAMESPACES\nNamespace Description\ndefault Where pods and deployments are created by default when none is provided. In smaller environments, you can deploy applications directly into the default namespace without creating additional logical separations. When you interact with the Kubernetes API, such as with kubectl get pods, the default namespace is used when none is specified.\nkube-system Where core resources exist, such as network features like DNS and proxy, or the Kubernetes dashboard. You typically don't deploy your own applications into this namespace.\nkube-public Typically not used, but can be used for resources to be visible across the whole cluster, and can be viewed by any user.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.7.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get all -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n as an alternative AZ CLI can be used:\n\n \n```\naz aks list --output yaml\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an AKS cluster, the value of \"enablePodSecurityPolicy\" is null.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the Azure AKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0239", - "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Azure AKS against Azure APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0240", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", - "long_description": "All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them.\n\n Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors.\n\n These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n If Network Policy is used, a cluster must have at least 2 nodes of type `n1-standard-1` or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 `n1-standard-1` instances.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the `kube-system` process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0241", - "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", - "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", - "long_description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms. When enabled, Azure AD principals will be validated exclusively by Azure RBAC while regular Kubernetes users and service accounts are exclusively validated by Kubernetes RBAC. Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.With Azure RBAC, you create a role definition that outlines the permissions to be applied. You then assign a user or group this role definition via a role assignment for a particular scope. The scope can be an individual resource, a resource group, or across the subscription.", - "remediation": "Set Azure RBAC as access system.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0243", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Azure Defender and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0244", - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", - "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0245", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0247", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Azure virtual machines and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.\n\n Limitations\nIP authorized ranges can't be applied to the private api server endpoint, they only apply to the public API server\nAvailability Zones are currently supported for certain regions.\nAzure Private Link service limitations apply to private clusters.\nNo support for Azure DevOps Microsoft-hosted Agents with private clusters. Consider to use Self-hosted Agents.\nFor customers that need to enable Azure Container Registry to work with private AKS, the Container Registry virtual network must be peered with the agent cluster virtual network.", - "default_value": "By default, Endpoint Private Access is disabled.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0248", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0249", - "name": "CIS-5.6.1 Restrict untrusted workloads", - "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", - "long_description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment. Azure Container Instances is a great solution for any scenario that can operate in isolated containers, including simple applications, task automation, and build jobs.", - "remediation": "", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "ACI is not a default component of the AKS", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0250", - "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", - "long_description": "The Cluster Service Account does not require administrative access to Azure ACR, only requiring pull access to containers to deploy onto Azure AKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0251", - "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", - "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Azure Container Registry (ACR) may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Azure ACR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0252", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "manual_test": "", - "references": [ - "\n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0254", - "name": "CIS-2.1.1 Enable audit Logs", - "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", - "long_description": "Exporting logs and metrics to a dedicated, persistent datastore ensures availability of audit data following a cluster security event, and provides a central location for analysis of log and metric data collated from multiple sources.", - "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", - "manual_test": "", - "references": [ - "\n\n \n\n " - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "What is collected from Kubernetes clusters\nContainer insights includes a predefined set of metrics and inventory items collected that are written as log data in your Log Analytics workspace. All metrics listed below are collected by default every one minute.\n\n Node metrics collected\nThe following list is the 24 metrics per node that are collected:\n\n cpuUsageNanoCores\ncpuCapacityNanoCores\ncpuAllocatableNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryCapacityBytes\nmemoryAllocatableBytes\nrestartTimeEpoch\nused (disk)\nfree (disk)\nused\\_percent (disk)\nio\\_time (diskio)\nwrites (diskio)\nreads (diskio)\nwrite\\_bytes (diskio)\nwrite\\_time (diskio)\niops\\_in\\_progress (diskio)\nread\\_bytes (diskio)\nread\\_time (diskio)\nerr\\_in (net)\nerr\\_out (net)\nbytes\\_recv (net)\nbytes\\_sent (net)\nKubelet\\_docker\\_operations (kubelet)\nContainer metrics\nThe following list is the eight metrics per container collected:\n\n cpuUsageNanoCores\ncpuRequestNanoCores\ncpuLimitNanoCores\nmemoryRssBytes\nmemoryWorkingSetBytes\nmemoryRequestBytes\nmemoryLimitBytes\nrestartTimeEpoch\nCluster inventory\nThe following list is the cluster inventory data collected by default:\n\n KubePodInventory \u2013 1 per minute per container\nKubeNodeInventory \u2013 1 per node per minute\nKubeServices \u2013 1 per service per minute\nContainerInventory \u2013 1 per container per minute", - "default_value": "By default, cluster control plane logs aren't sent to be Logged.", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0078", - "C-0088", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0182", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0201", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0235", - "C-0238", - "C-0239", - "C-0240", - "C-0241", - "C-0242", - "C-0243", - "C-0244", - "C-0245", - "C-0247", - "C-0248", - "C-0249", - "C-0250", - "C-0251", - "C-0252", - "C-0254" - ] - }, - { - "name": "NSA", - "description": "Implement NSA security advices for K8s ", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0009", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] - }, - { - "name": "ArmoBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster\u2019s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "long_description": "Running a compromised image in a cluster can compromise the cluster. Attackers who get access to a private registry can plant their own compromised images in the registry. The latter can then be pulled by a user. In addition, users often use untrusted images from public registries (such as Docker Hub) that may be malicious. Building images based on untrusted base images can also lead to similar results.", - "test": "Checking image from pod spec, if the registry of the image is from the list of blocked registries we raise an alert.", - "controlID": "C-0001", - "baseScore": 7.0, - "example": "@controls/examples/c001.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "long_description": "Attackers may try to map the cluster network to get information on the running applications, including scanning for known vulnerabilities. By default, there is no restriction on pods communication in Kubernetes. Therefore, attackers who gain access to a single container, may use it to probe the network.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0049", - "baseScore": 3.0, - "example": "@controls/examples/c049.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "long_description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "test": "Check that there is no 'sudo' in the container entrypoint", - "controlID": "C-0062", - "baseScore": 5.0, - "example": "@controls/examples/c062.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "Attackers with relevant RBAC permission can use \u201ckubectl portforward\u201d command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl portforward\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have relevant RBAC permissions, can run open a backdoor communication channel directly to the sockets inside target container using exec command \u201ckubectl portforward\u201d command. Using this method, attackers can bypass network security restrictions and communicate directly with software in the containers.", - "test": "Check which subjects have RBAC permissions to portforward into pods\u2013 if they have the \u201cpods/portforward\u201d resource.", - "controlID": "C-0063", - "baseScore": 5.0, - "example": "@controls/examples/c063.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "long_description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "test": "Check for RBACs giving 'impersonate' verb to users/groups/uids/serviceaccounts", - "controlID": "C-0065", - "baseScore": 6.0, - "example": "@controls/examples/c065.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "Images from allowed registry", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "If attackers get access to the cluster, they can re-point kubernetes to a compromized container repository. This control is intended to ensure that all the container images are taken from the authorized repositories only. User should list all the approved repositories in the parameters of this control so that any potential dangerous image can be identified.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "long_description": "Linux maintainers disclosed a broadly available Linux kernel vulnerability (CVE-2022-0185) which enables attackers to escape containers and get full control over the node. In order to be able to exploit this vulnerability, the attacker needs to be able to run code on in the container and the container must have CAP_SYS_ADMIN privileges. Linux kernel and all major distro maintainers have released patches. This control alerts on vulnerable kernel versions of Kubernetes nodes.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0079", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "long_description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD. Exploiting it enables attackers to obtain sensitive information like credentials, secrets, API keys from other applications on the platform. This in turn can lead to privilege escalation, lateral movements and information disclosure.", - "test": "Checking Linux kernel version of the Node objects, if it is above 5.1 or below 5.16.2 it fires an alert", - "controlID": "C-0081", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "long_description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. In order to exploit this vulnerability, malicious code should run as root in the container or have CAP_DAC_OVERRIDE capability. If SELinux or AppArmor is deployed, this CVE becomes not exploitable. Also, the exploit is possible when container runtime uses cgroup version 1 implementation (which we assume is on by default, since it is not visible from the Kubernetes level). When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. Note, it is enough to have a single node in the cluster with vulnerable Kernel in order to damage the system. This control identifies all the resources that don't deploy niether AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "test": "This control checks whether the container is running with high privileges (root or CAP_DAC_OVERRIDE capability) and doesn't have SELinux or AppArmor enabled. In case where the container is running with CAP_DAC_OVERRIDE capability, we also check for Seccomp, as it's enough to prevent the exploitation in this case.", - "controlID": "C-0086", - "baseScore": 4.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "long_description": "Containerd is a container runtime available as a daemon for Linux and Windows. A bug was found in containerd prior to versions 1.6.1, 1.5.10, and 1.4.12 where containers launched through containerd\u2019s CRI implementation on Linux with a specially-crafted image configuration could gain access to read-only copies of arbitrary files and directories on the host. This may bypass any policy-based enforcement on container setup (including a Kubernetes Pod Security Policy) and expose potentially sensitive information. This bug was fixed in containerd versions 1.6.1, 1.5.10, and 1.4.12. Users should update to these versions to resolve the issue.", - "test": "Checking containerd version to see if it is a vulnerable version (where the container runtime is containerd)", - "controlID": "C-0087", - "baseScore": 7.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "long_description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "test": "List the aggregated-API-server services that could potentially be used to redirect client traffic to any URL, if the API server version is vulnerable to CVE-2022-3172", - "controlID": "C-0089", - "baseScore": 3.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "long_description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy. Image signature verification process is used to verify the integrity of the image and prevent the execution of malicious images. The verification process was pull image manifest twice, once for verification and once for the actual execution. The verification process was bypassed by using a malicious image repository or MITM proxy to return a different manifest for the verification process. This vulnerability was fixed in Kyverno 1.8.5. This issue can be mitigated by using only trusted image repositories and by using a secure connection to the image repository. See C-0001 and C-0078 for limiting the use of trusted repositories.", - "test": "This control test for vulnerable versions of Grafana (between 1.8.3 and 1.8.4)", - "controlID": "C-0091", - "baseScore": 8.0, - "example": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0236", - "name": "Verify image signature", - "description": "Verifies the signature of each image with given public keys", - "long_description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "manual_test": "", - "references": [], - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0237", - "name": "Check if signature exists", - "description": "Ensures that all images contain some signature", - "long_description": "Verifies that each image is signed", - "remediation": "Replace the image with a signed image", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0001", - "C-0002", - "C-0005", - "C-0009", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0049", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0078", - "C-0079", - "C-0081", - "C-0086", - "C-0087", - "C-0089", - "C-0091", - "C-0236", - "C-0237" - ] - }, - { - "name": "cis-v1.23-t1.0.1", - "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", - "attributes": { - "version": "v1.0.1", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "1": { - "id": "1", - "name": "Control Plane Components", - "subSections": { - "1": { - "id": "1.1", - "name": "Control Plane Node Configuration Files", - "controlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112" - ] - }, - "2": { - "id": "1.2", - "name": "API Server", - "controlsIDs": [ - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143" - ] - }, - "3": { - "id": "1.3", - "name": "Controller Manager", - "controlsIDs": [ - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150" - ] - }, - "4": { - "id": "1.4", - "name": "Scheduler", - "controlsIDs": [ - "C-0151", - "C-0152" - ] - } - } - }, - "2": { - "name": "etcd", - "id": "2", - "controlsIDs": [ - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159" - ] - }, - "3": { - "name": "Control Plane Configuration", - "id": "3", - "subSections": { - "2": { - "name": "Logging", - "id": "3.2", - "controlsIDs": [ - "C-0160", - "C-0161" - ] - } - } - }, - "4": { - "name": "Worker Nodes", - "id": "4", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "4.1", - "controlsIDs": [ - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171" - ] - }, - "2": { - "name": "Kubelet", - "id": "4.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184" - ] - } - } - }, - "5": { - "name": "Policies", - "id": "5", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "5.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Standards", - "id": "5.2", - "controlsIDs": [ - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204" - ] - }, - "3": { - "name": "Network Policies and CNI", - "id": "5.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "5.4", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "7": { - "name": "General Policies", - "id": "5.7", - "controlsIDs": [ - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "controlID": "C-0092", - "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838561" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0093", - "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "long_description": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838563" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0094", - "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838564" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kube-controller-manager.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0095", - "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "long_description": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838566" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0096", - "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838568" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0097", - "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "long_description": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838570" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0098", - "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838571" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0099", - "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "long_description": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/manifests/etcd.yaml\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838573" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0100", - "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838574" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0101", - "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "long_description": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838576" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "NA", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0102", - "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %a /var/lib/etcd\n\n```\n Verify that the permissions are `700` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838577" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory has permissions of `755`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0103", - "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "long_description": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "manual_test": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nstat -c %U:%G /var/lib/etcd\n\n```\n Verify that the ownership is set to `etcd:etcd`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838579" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, etcd data directory ownership is set to `etcd:etcd`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0104", - "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "long_description": "The `admin.conf` is the administrator kubeconfig file defining various settings for the administration of the cluster. This file contains private key and respective certificate allowed to fully manage the cluster. You should restrict its file permissions to maintain the integrity and confidentiality of the file. The file should be readable and writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/admin.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838580" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, admin.conf has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0105", - "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "long_description": "The `admin.conf` file contains the admin credentials for the cluster. You should set its file ownership to maintain the integrity and confidentiality of the file. The file should be owned by root:root.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/admin.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838584" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None.", - "default_value": "By default, `admin.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0106", - "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/scheduler.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838586" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0107", - "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "long_description": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/scheduler.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838587" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `scheduler.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0108", - "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the following command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838593" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0109", - "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "long_description": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nstat -c %U:%G /etc/kubernetes/controller-manager.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838599" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `controller-manager.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0110", - "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "long_description": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/\n\n```\n Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838604" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0111", - "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "long_description": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `600` or more restrictive to protect their integrity.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.crt\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838606" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the certificates used by Kubernetes are set to have permissions of `644`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0112", - "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "long_description": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nls -laR /etc/kubernetes/pki/*.key\n\n```\n Verify that the permissions are `600`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126653/recommendations/1838608" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "None", - "default_value": "By default, the keys used by Kubernetes are set to have permissions of `600`", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0113", - "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the API server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests.\n\n If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838609" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0114", - "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", - "description": "Do not use token based authentication.", - "long_description": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--token-auth-file` argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838611" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0115", - "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "long_description": "This admission controller rejects all net-new usage of the Service field externalIPs. This feature is very powerful (allows network traffic interception) and not well controlled by policy. When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects. Existing uses of externalIPs are not affected, and users may remove values from externalIPs on existing Service objects.\n\n Most users do not need this feature at all, and cluster admins should consider disabling it. Clusters that do need to use this feature should consider using some custom policy to manage usage of it.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--DenyServiceExternalIPs argument does not exist.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838614" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", - "default_value": "By default, `--token-auth-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0116", - "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "description": "Enable certificate based kubelet authentication.", - "long_description": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838624" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, certificate-based kubelet authentication is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0117", - "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "description": "Verify kubelet's certificate before establishing connection.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838634" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--kubelet-certificate-authority` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0118", - "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not always authorize all requests.", - "long_description": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838639" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Only authorized requests will be served.", - "default_value": "By default, `AlwaysAllow` is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0119", - "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "long_description": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838641" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, `Node` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0120", - "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", - "description": "Turn on Role Based Access Control.", - "long_description": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838642" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", - "default_value": "By default, `RBAC` authorization is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0121", - "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", - "description": "Limit the rate at which the API server accepts requests.", - "long_description": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.\n\n Note: This is an Alpha feature in the Kubernetes 1.15 release.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838644" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "You need to carefully tune in limits as per your environment.", - "default_value": "By default, `EventRateLimit` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0122", - "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", - "description": "Do not allow all requests.", - "long_description": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests.\n\n The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838647" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Only requests explicitly allowed by the admissions control plugins would be served.", - "default_value": "`AlwaysAdmit` is not in the list of default admission plugins.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0123", - "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", - "description": "Always pull images.", - "long_description": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image\u2019s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838649" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", - "default_value": "By default, `AlwaysPullImages` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0124", - "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "long_description": "SecurityContextDeny can be used to provide a layer of security for clusters which do not have PodSecurityPolicies enabled.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `SecurityContextDeny`, if `PodSecurityPolicy` is not included.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838650" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies", - "default_value": "By default, `SecurityContextDeny` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0125", - "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", - "description": "Automate service accounts management.", - "long_description": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838652" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "None.", - "default_value": "By default, `ServiceAccount` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0126", - "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "long_description": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838653" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "None", - "default_value": "By default, `NamespaceLifecycle` is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0127", - "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "long_description": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838655" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `NodeRestriction` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0128", - "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", - "description": "Do not disable the secure port.", - "long_description": "The secure port is used to serve https with authentication and authorization. If you disable it, no https traffic is served and all traffic is served unencrypted.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--secure-port` argument is either not set or is set to an integer value between 1 and 65535.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838659" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "You need to set the API Server up with the right TLS certificates.", - "default_value": "By default, port 6443 is used as the secure port.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0129", - "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838660" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0130", - "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "long_description": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-path` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838662" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0131", - "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "description": "Retain the logs for at least 30 days or as appropriate.", - "long_description": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838664" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0132", - "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "description": "Retain 10 or an appropriate number of old log files.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838665" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0133", - "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "long_description": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838666" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, auditing is not enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0134", - "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", - "description": "Set global request timeout for API server requests as appropriate.", - "long_description": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838667" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--request-timeout` is set to 60 seconds.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0135", - "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", - "description": "Validate service account before validating token.", - "long_description": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that if the `--service-account-lookup` argument exists it is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838668" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `--service-account-lookup` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0136", - "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "long_description": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--service-account-key-file` argument exists and is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838669" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-key-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0137", - "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838670" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0138", - "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838671" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0139", - "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", - "description": "Setup TLS connection on the API server.", - "long_description": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--client-ca-file` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838672" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0140", - "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838673" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "TLS and client certificate authentication must be configured for etcd.", - "default_value": "By default, `--etcd-cafile` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0141", - "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "description": "Encrypt etcd key-value store.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838674" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `--encryption-provider-config` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0142", - "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "long_description": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms` or `secretbox` is set as the encryption provider for all the desired `resources`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838675" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, no encryption provider is set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0143", - "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", - "references": [ - "https://workbench.cisecurity.org/sections/1126663/recommendations/1838676" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0144", - "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "description": "Activate garbage collector on pod termination, as appropriate.", - "long_description": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838677" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "By default, `--terminated-pod-gc-threshold` is set to `12500`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0145", - "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838678" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0146", - "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "description": "Use individual service account credentials for each controller.", - "long_description": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--use-service-account-credentials` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838679" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", - "default_value": "By default, `--use-service-account-credentials` is set to false.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0147", - "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "long_description": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--service-account-private-key-file` argument is set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838680" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", - "default_value": "By default, `--service-account-private-key-file` it not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0148", - "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "long_description": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks.\n\n Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838681" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "You need to setup and maintain root certificate authority file.", - "default_value": "By default, `--root-ca-file` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0149", - "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838682" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0150", - "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "long_description": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-controller-manager\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126669/recommendations/1838683" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0151", - "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", - "description": "Disable profiling, if not needed.", - "long_description": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--profiling` argument is set to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838684" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Profiling information would not be available.", - "default_value": "By default, profiling is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0152", - "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "long_description": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "manual_test": "Run the following command on the Control Plane node:\n\n \n```\nps -ef | grep kube-scheduler\n\n```\n Verify that the `--bind-address` argument is set to 127.0.0.1", - "references": [ - "https://workbench.cisecurity.org/sections/1126670/recommendations/1838685" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "None", - "default_value": "By default, the `--bind-address` parameter is set to 0.0.0.0", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0153", - "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", - "description": "Configure TLS encryption for the etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838562" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Client connections only over TLS would be served.", - "default_value": "By default, TLS encryption is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0154", - "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", - "description": "Enable client authentication on etcd service.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--client-cert-auth` argument is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838565" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "All clients attempting to access the etcd server will require a valid client certificate.", - "default_value": "By default, the etcd service can be queried by unauthenticated clients.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0155", - "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", - "description": "Do not use self-signed certificates for TLS.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--auto-tls` argument exists, it is not set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838567" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Clients will not be able to use self-signed certificates for TLS.", - "default_value": "By default, `--auto-tls` is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0156", - "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate.\n\n **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838569" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "etcd cluster peers would need to set up TLS for their communication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0157", - "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", - "description": "etcd should be configured for peer authentication.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", - "manual_test": "Run the following command on the etcd server node: ```ps -ef | grep etcd``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838572" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0158", - "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "long_description": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", - "manual_test": "Run the following command on the etcd server node:\n\n \n```\nps -ef | grep etcd\n\n```\n Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`.\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838575" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", - "default_value": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0159", - "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "long_description": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only.\n\n Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "manual_test": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep etcd\n\n```\n Note the file referenced by the `--trusted-ca-file` argument.\n\n Run the following command on the master node:\n\n \n```\nps -ef | grep apiserver\n\n```\n Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", - "references": [ - "https://workbench.cisecurity.org/sections/1126654/recommendations/1838578" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", - "default_value": "By default, no etcd certificate is created and used.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0160", - "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "long_description": "Logging is an important detective control for all systems, to detect potential unauthorised access.", - "remediation": "Create an audit policy file for your cluster.", - "manual_test": "Run the following command on one of the cluster master nodes:\n\n \n```\nps -ef | grep kube-apiserver\n\n```\n Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838582" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", - "default_value": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0161", - "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "long_description": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "manual_test": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :-\n\n * Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data.\n* Modification of `pod` and `deployment` objects.\n* Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.\n\n For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", - "references": [ - "https://workbench.cisecurity.org/sections/1126657/recommendations/1838583" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Increasing audit logging will consume resources on the nodes or other log destination.", - "default_value": "By default Kubernetes clusters do not log audit information.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0162", - "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838585" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, the `kubelet` service file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0163", - "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "long_description": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838589" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet` service file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0164", - "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "long_description": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a \n\n```\n Verify that a file is specified and it exists with permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838598" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, proxy file has permissions of `640`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0165", - "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "Find the kubeconfig file being used by `kube-proxy` by running the following command:\n\n \n```\nps -ef | grep kube-proxy\n\n```\n If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter.\n\n To perform the audit:\n\n Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838603" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `proxy` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0166", - "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838607" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file has permissions of `600`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0167", - "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "long_description": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %U %G /etc/kubernetes/kubelet.conf\n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838613" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, `kubelet.conf` file ownership is set to `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0168", - "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %a \n\n```\n Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838618" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0169", - "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "long_description": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "manual_test": "Run the following command:\n\n \n```\nps -ef | grep kubelet\n\n```\n Find the file specified by the `--client-ca-file` argument.\n\n Run the following command:\n\n \n```\nstat -c %U:%G \n\n```\n Verify that the ownership is set to `root:root`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838619" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default no `--client-ca-file` is specified.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0170", - "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n\n```\n Verify that the permissions are `600` or more restrictive.", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838620" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "manual_test": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nstat -c %a /var/lib/kubelet/config.yaml\n```Verify that the ownership is set to `root:root`.\n\n```", - "references": [ - "https://workbench.cisecurity.org/sections/1126659/recommendations/1838629" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`.\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--anonymous-auth` argument is set to `false`.\n\n This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838638" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "By default, anonymous access is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`.\n\n It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838640" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "By default, `--authorization-mode` argument is set to `AlwaysAllow`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file.\n\n If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838643" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "By default, `--client-ca-file` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838645" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "By default, `--read-only-port` is set to `10255/TCP`. However, if a config file is specified by `--config` the default value for `readOnlyPort` is 0.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`.\n\n If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838646" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "By default, `--streaming-connection-idle-timeout` is set to 4 hours.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--protect-kernel-defaults` argument is set to `true`.\n\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838648" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "By default, `--protect-kernel-defaults` is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838651" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "By default, `--make-iptables-util-chains` argument is set to `true`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838654" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", - "default_value": "By default, `--hostname-override` argument is not set.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Review the value set for the `--event-qps` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--event-qps` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838656" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "By default, `--event-qps` argument is set to `5`.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "description": "Setup TLS connection on the Kubelets.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate.\n\n If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838657" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0182", - "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.\n\n **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the `--rotate-certificates` argument is not present, or is set to `true`.\n\n If the `--rotate-certificates` argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `rotateCertificates: false`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838658" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet client certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838661" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "By default, kubelet server certificate rotation is enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0184", - "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "long_description": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "manual_test": "The set of cryptographic ciphers currently considered secure is the following:\n\n * `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`\n* `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`\n* `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_256_GCM_SHA384`\n* `TLS_RSA_WITH_AES_128_GCM_SHA256`\n\n Run the following command on each node:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set.\n\n If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `TLSCipherSuites:` to only include values from this set.", - "references": [ - "https://workbench.cisecurity.org/sections/1126668/recommendations/1838663" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", - "default_value": "By default the Kubernetes API server supports a wide range of TLS ciphers", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n \n```\nkubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name\n\n```\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838588" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838590" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:expand-controller expand-controller ServiceAccount kube-systemsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-systemsystem:controller:namespace-controller namespace-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:kube-controller-manager system:kube-controller-manager User ```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838591" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838592" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ```CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACEcluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-systemsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-systemsystem:controller:job-controller job-controller ServiceAccount kube-systemsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-systemsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-systemsystem:controller:replication-controller replication-controller ServiceAccount kube-systemsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838594" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838595" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://workbench.cisecurity.org/sections/1126661/recommendations/1838597" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0192", - "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "long_description": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "manual_test": "Pod Security Admission is enabled by default on all clusters using Kubernetes 1.23 or higher. To assess what controls, if any, are in place using this mechanism, review the namespaces in the cluster to see if the[required labels](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces) have been applied\n\n \n```\nkubectl get namespaces -o yaml\n\n```\n To confirm if any external policy control system is in use, review the cluster for the presence of `validatingadmissionwebhook` and `mutatingadmissionwebhook` objects.\n\n \n```\nkubectl get validatingwebhookconfigurations\n\n```\n \n```\nkubectl get mutatingwebhookconfigurations\n\n```", - "test": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838600" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", - "default_value": "By default, Pod Security Admission is enabled but no policies are in place.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0193", - "name": "CIS-5.2.2 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one admission control policy defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of privileged containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838601" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of privileged containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0194", - "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostPID` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838602" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPID` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0195", - "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace.\n\n If you need to run containers which require hostIPC, this should be definited in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostIPC` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838605" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostIPC` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0196", - "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one admission control policy defined which does not permit containers to share the host network namespace.\n\n If you need to run containers which require access to the host's network namesapces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostNetwork` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838610" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostNetwork` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0197", - "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838612" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0198", - "name": "CIS-5.2.7 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one admission control policy defined which does not permit root containers.\n\n If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838615" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0199", - "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET\\_RAW capability is enabled which may be misused by malicious containers.\n\n Ideally, all containers should drop this capability.\n\n There should be at least one admission control policy defined which does not permit containers with the NET\\_RAW capability.\n\n If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838617" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods with containers which run with the NET\\_RAW capability will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0200", - "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that policies are present which prevent `allowedCapabilities` to be set to anything other than an empty array.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838621" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, there are no restrictions on adding capabilities to containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0201", - "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that at least one policy requires that capabilities are dropped by all containers.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838622" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, there are no restrictions on the creation of containers with additional capabilities", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0202", - "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "long_description": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides \"privileged access\" to the Windows node.\n\n Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers.\n\n If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838623" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostProcess` containers.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0203", - "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "long_description": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem.\n\n There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes.\n\n If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838625" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a spefific policy.", - "default_value": "By default, there are no restrictions on the creation of `hostPath` volumes.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0204", - "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", - "description": "Do not generally permit containers which require the use of HostPorts.", - "long_description": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy.\n\n There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts.\n\n If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "manual_test": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", - "references": [ - "https://workbench.cisecurity.org/sections/1126662/recommendations/1838626" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", - "default_value": "By default, there are no restrictions on the use of HostPorts.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838627" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl --all-namespaces get networkpolicy\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://workbench.cisecurity.org/sections/1126664/recommendations/1838628" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838630" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.4.2 Consider external secret storage", - "controlID": "C-0208", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrets are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "manual_test": "Review your secrets management implementation.", - "test": "Checking encryption configuration to see if secrets are managed externally by kms using aws, azure, or akeyless vault", - "references": [ - "https://workbench.cisecurity.org/sections/1126665/recommendations/1838631" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838633" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces: 1. `default` - The default namespace for objects with no other namespace2. `kube-system` - The namespace for objects created by the Kubernetes system3. `kube-node-lease` - Namespace used for node heartbeats4. `kube-public` - Namespace used for public information in a cluster", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", - "controlID": "C-0210", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "long_description": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "manual_test": "Review the pod definitions in your cluster. It should create a line as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "test": "Checks if seccomp profile is defined as type RuntimeDefault in security context of workload or container level", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838635" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", - "default_value": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.7.4 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838637" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0092", - "C-0093", - "C-0094", - "C-0095", - "C-0096", - "C-0097", - "C-0098", - "C-0099", - "C-0100", - "C-0101", - "C-0102", - "C-0103", - "C-0104", - "C-0105", - "C-0106", - "C-0107", - "C-0108", - "C-0109", - "C-0110", - "C-0111", - "C-0112", - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143", - "C-0144", - "C-0145", - "C-0146", - "C-0147", - "C-0148", - "C-0149", - "C-0150", - "C-0151", - "C-0152", - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159", - "C-0160", - "C-0161", - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204", - "C-0205", - "C-0206", - "C-0207", - "C-0208", - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - }, - { - "name": "cis-eks-t1.2.0", - "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9681", - "attributes": { - "version": "v1.2.0", - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "subSections": { - "2": { - "name": "Control Plane Configuration", - "id": "2", - "subSections": { - "1": { - "name": "Logging", - "id": "2.1", - "controlsIDs": [ - "C-0067" - ] - } - } - }, - "3": { - "name": "Worker Nodes", - "id": "3", - "subSections": { - "1": { - "name": "Worker Node Configuration Files", - "id": "3.1", - "controlsIDs": [ - "C-0167", - "C-0171", - "C-0235", - "C-0238" - ] - }, - "2": { - "name": "Kubelet", - "id": "3.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183" - ] - }, - "3": { - "name": "Container Optimized OS", - "id": "3.3", - "controlsIDs": [ - "C-0226" - ] - } - } - }, - "4": { - "name": "Policies", - "id": "4", - "subSections": { - "1": { - "name": "RBAC and Service Accounts", - "id": "4.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "name": "Pod Security Policies", - "id": "4.2", - "controlsIDs": [ - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220" - ] - }, - "3": { - "name": "CNI Plugin", - "id": "4.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "name": "Secrets Management", - "id": "4.4", - "controlsIDs": [ - "C-0207", - "C-0234" - ] - }, - "6": { - "name": "General Policies", - "id": "4.6", - "controlsIDs": [ - "C-0209", - "C-0211", - "C-0212" - ] - } - } - }, - "5": { - "name": "Managed services", - "id": "5", - "subSections": { - "1": { - "name": "Image Registry and Image Scanning", - "id": "5.1", - "controlsIDs": [ - "C-0221", - "C-0223", - "C-0078" - ] - }, - "2": { - "name": "Identity and Access Management (IAM)", - "id": "5.2", - "controlsIDs": [ - "C-0225" - ] - }, - "3": { - "name": "AWS EKS Key Management Service", - "id": "5.3", - "controlsIDs": [ - "C-0066" - ] - }, - "4": { - "name": "Cluster Networking", - "id": "5.4", - "controlsIDs": [ - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231" - ] - }, - "5": { - "name": "Authentication and Authorization", - "id": "5.5", - "controlsIDs": [ - "C-0232" - ] - }, - "6": { - "name": "Other Cluster Configurations", - "id": "5.6", - "controlsIDs": [ - "C-0233" - ] - } - } - } - }, - "version": null, - "controls": [ - { - "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", - "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", - "long_description": "Kubernetes can store secrets that pods can access via a mounted volume. Today, Kubernetes secrets are stored with Base64 encoding, but encrypting is the recommended approach. Amazon EKS clusters version 1.13 and higher support the capability of encrypting your Kubernetes secrets using AWS Key Management Service (KMS) Customer Managed Keys (CMK). The only requirement is to enable the encryption provider support during EKS cluster creation.\n\n Use AWS Key Management Service (KMS) keys to provide envelope encryption of Kubernetes secrets stored in Amazon EKS. Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.\n\n Application-layer Secrets Encryption provides an additional layer of security for sensitive data, such as user defined Secrets and Secrets required for the operation of the cluster, such as service account keys, which are all stored in etcd.\n\n Using this functionality, you can use a key, that you manage in AWS KMS, to encrypt data at the application layer. This protects against attackers in the event that they manage to gain access to etcd.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [], - "manual_test": "Using the etcdctl commandline, read that secret out of etcd:\n\n \n```\nETCDCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C\n\n```\n where [...] must be the additional arguments for connecting to the etcd server.\n\n Verify the stored secret is prefixed with k8s:enc:aescbc:v1: which indicates the aescbc provider has encrypted the resulting data.", - "references": [ - "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/" - ], - "impact_statement": "", - "default_value": "By default secrets created using the Kubernetes API are stored in *tmpfs* and are encrypted at rest." - }, - { - "name": "CIS-2.1.1 Enable audit Logs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", - "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", - "long_description": "Audit logs enable visibility into all API server requests from authentic and anonymous sources. Stored log data can be analyzed manually or with tools to identify and understand anomalous or negative activity and lead to intelligent remediations.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "name": "CIS-5.1.4 Minimize Container Registries to only those approved", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Use approved container registries.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "long_description": "Allowing unrestricted access to external container registries provides the opportunity for malicious or unapproved containers to be deployed into the cluster. Allowlisting only approved container registries reduces this risk.", - "test": "Checks if image is from allowed listed registry.", - "controlID": "C-0078", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [], - "references": [ - "https://aws.amazon.com/blogs/opensource/using-open-policy-agent-on-amazon-eks/" - ], - "impact_statement": "All container images to be deployed to the cluster must be hosted within an approved container image registry.", - "default_value": "" - }, - { - "controlID": "C-0167", - "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", - "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "long_description": "The kubeconfig file for `kubelet` controls various parameters for the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file ownership:\n\n \n```\nstat -c %U:%G /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's ownership. Verify that the ownership is set to `root:root`.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0171", - "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %U:%G /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's ownership. Verify that the ownership is set to `root:root`", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0172", - "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", - "description": "Disable anonymous requests to the Kubelet server.", - "long_description": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the[Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Anonymous Authentication is not enabled. This may be configured as a command line argument to the kubelet service with `--anonymous-auth=false` or in the kubelet configuration file via `\"authentication\": { \"anonymous\": { \"enabled\": false }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with `kubectl` on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Anonymous Authentication is not enabled checking that `\"authentication\": { \"anonymous\": { \"enabled\": false }` is in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Anonymous requests will be rejected.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0173", - "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "description": "Do not allow all requests. Enable explicit authorization.", - "long_description": "Kubelets can be configured to allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that Webhook Authentication is enabled. This may be enabled as a command line argument to the kubelet service with `--authentication-token-webhook` or in the kubelet configuration file via `\"authentication\": { \"webhook\": { \"enabled\": true } }`.\n\n Verify that the Authorization Mode is set to `WebHook`. This may be set as a command line argument to the kubelet service with `--authorization-mode=Webhook` or in the configuration file via `\"authorization\": { \"mode\": \"Webhook }`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that Webhook Authentication is enabled with `\"authentication\": { \"webhook\": { \"enabled\": true } }` in the API response.\n\n Verify that the Authorization Mode is set to `WebHook` with `\"authorization\": { \"mode\": \"Webhook }` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Unauthorized requests will be denied.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0174", - "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", - "description": "Enable Kubelet authentication using certificates.", - "long_description": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet\u2019s port-forwarding functionality. These connections terminate at the kubelet\u2019s HTTPS endpoint. By default, the apiserver does not verify the kubelet\u2019s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", - "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Kubelets can accept configuration via a configuration file and in some cases via command line arguments. It is important to note that parameters provided as command line arguments will override their counterpart parameters in the configuration file (see `--config` details in the [Kubelet CLI Reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info, where you can also find out which configuration parameters can be supplied as a command line argument).\n\n With this in mind, it is important to check for the existence of command line arguments as well as configuration file entries when auditing Kubelet configuration.\n\n Firstly, SSH to each node and execute the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active Kubelet process, from which we can see the command line arguments provided to the process. Also note the location of the configuration file, provided with the `--config` argument, as this will be needed to verify configuration. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Verify that a client certificate authority file is configured. This may be configured using a command line argument to the kubelet service with `--client-ca-file` or in the kubelet configuration file via `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"`.\n\n **Audit Method 2:**\n\n It is also possible to review the running configuration of a Kubelet via the /configz endpoint of the Kubernetes API. This can be achieved using `kubectl` to proxy your requests to the API.\n\n Discover all nodes in your cluster by running the following command:\n\n \n```\nkubectl get nodes\n\n```\n Next, initiate a proxy with kubectl on a local port of your choice. In this example we will use 8080:\n\n \n```\nkubectl proxy --port=8080\n\n```\n With this running, in a separate terminal run the following command for each node:\n\n \n```\nexport NODE_NAME=my-node-name\ncurl http://localhost:8080/api/v1/nodes/${NODE_NAME}/proxy/configz \n\n```\n The curl command will return the API response which will be a JSON formatted string representing the Kubelet configuration.\n\n Verify that a client certificate authority file is configured with `\"authentication\": { \"x509\": {\"clientCAFile\": } }\"` in the API response.", - "references": [ - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", - "https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication", - "https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "You require TLS to be configured on apiserver as well as kubelets.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0175", - "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", - "description": "Disable the read-only port.", - "long_description": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", - "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `0`.\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `--read-only-port` argument exists and is set to `0`.\n\n If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0176", - "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "description": "Do not disable timeouts on streaming connections.", - "long_description": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports.\n\n **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to the relevant node:\n\n Run the following command on each node to find the running kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the command line for the process includes the argument `streaming-connection-idle-timeout` verify that it is not set to 0.\n\n If the `streaming-connection-idle-timeout` argument is not present in the output of the above command, refer instead to the `config` argument that specifies the location of the Kubelet config file e.g. `--config /etc/kubernetes/kubelet/kubelet-config.json`.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `streamingConnectionIdleTimeout` argument is not set to `0`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":\"4h0m0s\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/pull/18552" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Long-lived connections could be interrupted.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0177", - "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "long_description": "Kernel parameters are usually tuned and hardened by the system administrators before putting the systems into production. These parameters protect the kernel and the system. Your kubelet kernel defaults that rely on such parameters should be appropriately set to match the desired secured system state. Ignoring this could potentially lead to running pods with undesired kernel behavior.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n Run the following command on each node to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that the command line for kubelet includes this argument set to `true`:\n\n \n```\n--protect-kernel-defaults=true\n\n```\n If the `--protect-kernel-defaults` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `protectKernelDefaults` to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\"` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "You would have to re-tune kernel parameters to match kubelet parameters.", - "default_value": "See the EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0178", - "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "description": "Allow Kubelet to manage iptables.", - "long_description": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the above command includes the argument `--make-iptables-util-chains` then verify it is set to true.\n\n If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `authentication... \"makeIPTablesUtilChains.:true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0179", - "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", - "description": "Do not override node hostnames.", - "long_description": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs. Usage of --hostname-override also may have some undefined/unsupported behaviours.", - "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n Verify that `--hostname-override` argument does not exist in the output of the above command.\n\n **Note** This setting is not configurable via the Kubelet config file.", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/issues/22063", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "--hostname-override may not take when the kubelet also has --cloud-provider aws", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0180", - "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", - "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "long_description": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node.\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n In the output of the above command review the value set for the `--eventRecordQPS` argument and determine whether this has been set to an appropriate level for the cluster. The value of `0` can be used to ensure that all events are captured.\n\n If the `--eventRecordQPS` argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location.\nThe output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n If there is an entry for `eventRecordQPS` check that it is set to 0 or an appropriate level for the cluster.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `eventRecordQPS` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://kubernetes.io/docs/admin/kubelet/", - "https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 2, - "impact_statement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0181", - "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not present or is set to true", - "description": "Enable kubelet client certificate rotation.", - "long_description": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.\n\n **Note:** This feature also requires the `RotateKubeletClientCertificate` feature gate to be enabled.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", - "manual_test": "**Audit Method 1:**\n\n SSH to each node and run the following command to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--RotateCertificate` executable argument, verify that it is set to true.\nIf the output of the command above does not include the `--RotateCertificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that the `RotateCertificate` argument is not present, or is set to `true`.", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/41912", - "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration", - "https://kubernetes.io/docs/imported/release/notes/", - "https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", - "https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0183", - "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", - "description": "Enable kubelet server certificate rotation.", - "long_description": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA (Confidentiality, Integrity, and Availability) security triad.\n\n Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to implement rotation yourself.", - "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", - "manual_test": "**Audit Method 1:**\n\n First, SSH to each node:\n\n Run the following command on each node to find the Kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n If the output of the command above includes the `--rotate-kubelet-server-certificate` executable argument verify that it is set to true.\n\n If the process does not have the `--rotate-kubelet-server-certificate` executable argument then check the Kubelet config file. The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Open the Kubelet config file:\n\n \n```\ncat /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n Verify that `RotateKubeletServerCertificate` argument exists in the `featureGates` section and is set to `true`.\n\n **Audit Method 2:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":true` by extracting the live configuration from the nodes running kubelet.\n\n Set the local proxy port and the following variables and provide proxy port number and node name;\n`HOSTNAME_PORT=\"localhost-and-port-number\"` `NODE_NAME=\"The-Name-Of-Node-To-Extract-Configuration\" from the output of \"kubectl get nodes\"`\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```", - "references": [ - "https://github.com/kubernetes/kubernetes/pull/45059", - "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None", - "default_value": "See the Amazon EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", - "controlID": "C-0185", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "long_description": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "manual_test": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role.\n\n kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[\\*].name\n\n Review each principal listed and ensure that `cluster-admin` privilege is required for it.", - "test": "Check which subjects have are bound to the cluster-admin role with a clusterrolebinding.", - "references": [ - "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", - "default_value": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.2 Minimize access to secrets", - "controlID": "C-0186", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "long_description": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "manual_test": "Review the users who have `get`, `list` or `watch` access to `secrets` objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to get, list or watch Kubernetes secrets.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to secrets to system components which require this for their operation", - "default_value": "By default, the following list of principals have `get` privileges on `secret` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:expand-controller expand-controller ServiceAccount kube-system\nsystem:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system\nsystem:controller:namespace-controller namespace-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:kube-controller-manager system:kube-controller-manager User \n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", - "controlID": "C-0187", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "long_description": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "manual_test": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards\n\n \n```\nkubectl get roles --all-namespaces -o yaml\n\n```\n Retrieve the cluster roles defined in the cluster and review for wildcards\n\n \n```\nkubectl get clusterroles -o yaml\n\n```", - "test": "Check which subjects have wildcard RBAC permissions.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.4 Minimize access to create pods", - "controlID": "C-0188", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "long_description": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "manual_test": "Review the users who have create access to pod objects in the Kubernetes API.", - "test": "Check which subjects have RBAC permissions to create pods.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Care should be taken not to remove access to pods to system components which require this for their operation", - "default_value": "By default, the following list of principals have `create` privileges on `pod` objects\n\n \n```\nCLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE\ncluster-admin system:masters Group \nsystem:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system\nsystem:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system\nsystem:controller:job-controller job-controller ServiceAccount kube-system\nsystem:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system\nsystem:controller:replicaset-controller replicaset-controller ServiceAccount kube-system\nsystem:controller:replication-controller replication-controller ServiceAccount kube-system\nsystem:controller:statefulset-controller statefulset-controller ServiceAccount kube-system\n\n```", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", - "controlID": "C-0189", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "long_description": "Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod.\n\n Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account.\n\n The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", - "test": "Checks that each namespace has at least one service account that isn't the default, and checks that the default service accounts have 'automountServiceAccountToken: false' set", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "https://aws.github.io/aws-eks-best-practices/iam/#disable-auto-mounting-of-service-account-tokens" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", - "default_value": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "controlID": "C-0190", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "long_description": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster.\n\n Avoiding mounting these tokens removes this attack avenue.", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", - "manual_test": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access.\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "test": "Check that all service accounts and workloads disable automount of service account tokens.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", - "default_value": "By default, all pods get a service account token mounted in them.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "controlID": "C-0191", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "long_description": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level.\n\n Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "manual_test": "Review the users who have access to cluster roles or roles which provide the impersonate, bind or escalate privileges.", - "references": [ - "https://www.impidio.com/blog/kubernetes-rbac-security-pitfalls", - "https://raesene.github.io/blog/2020/12/12/Escalating_Away/", - "https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", - "default_value": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0205", - "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "long_description": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", - "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", - "manual_test": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports network policies.", - "references": [ - "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", - "https://aws.github.io/aws-eks-best-practices/network/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None.", - "default_value": "This will depend on the CNI plugin in use.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", - "controlID": "C-0206", - "description": "Use network policies to isolate traffic in your cluster network.", - "long_description": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints.\n\n Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "manual_test": "Run the below command and review the `NetworkPolicy` objects created in the cluster.\n\n \n```\nkubectl get networkpolicy --all-namespaces\n\n```\n Ensure that each namespace defined in the cluster has at least one Network Policy.", - "test": "Check for each namespace if there is a network policy defined.", - "references": [ - "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/", - "https://octetz.com/posts/k8s-network-policy-apis", - "https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic\"", - "default_value": "By default, network policies are not created.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", - "controlID": "C-0207", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "long_description": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "manual_test": "Run the following command to find references to objects which use environment variables defined from secrets.\n\n \n```\nkubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {\"\\n\"}{end}' -A\n\n```", - "test": "Check if pods have secrets in their environment variables", - "references": [ - "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "Application code which expects to read secrets in the form of environment variables would need modification", - "default_value": "By default, secrets are not defined", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.1 Create administrative boundaries between resources using namespaces", - "controlID": "C-0209", - "description": "Use namespaces to isolate your Kubernetes objects.", - "long_description": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in an Amazon EKS cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "manual_test": "Run the below command and review the namespaces created in the cluster.\n\n \n```\nkubectl get namespaces\n\n```\n Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", - "test": "Lists all namespaces in cluster for user to review", - "references": [ - "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "You need to switch between namespaces for administration.", - "default_value": "By default, Kubernetes starts with two initial namespaces:\n\n 1. `default` - The default namespace for objects with no other namespace\n2. `kube-system` - The namespace for objects created by the Kubernetes system\n3. `kube-public` - The namespace for public-readable ConfigMap\n4. `kube-node-lease` - The namespace for associated lease object for each node", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.2 Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "name": "CIS-4.6.3 The default namespace should not be used", - "controlID": "C-0212", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "long_description": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "manual_test": "Run this command to list objects in default namespace\n\n \n```\nkubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default\n\n```\n The only entries there should be system managed resources such as the `kubernetes` service", - "test": "Lists all resources in default namespace for user to review and approve.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 4, - "impact_statement": "None", - "default_value": "Unless a namespace is specific on object creation, the `default` namespace will be used", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0213", - "name": "CIS-4.2.1 Minimize the admission of privileged containers", - "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", - "long_description": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit privileged containers.\n\n If you need to run privileged containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o json\n\n```\n Verify that there is at least one PSP which does not return `true`.\n\n `kubectl get psp -o=jsonpath='{.spec.privileged}'`", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://aws.github.io/aws-eks-best-practices/pods/#restrict-the-containers-that-can-run-as-privileged" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Pods defined with `spec.containers[].securityContext.privileged: true` will not be permitted.", - "default_value": "By default, when you provision an EKS cluster, a pod security policy called `eks.privileged` is automatically created. The manifest for that policy appears below:\n\n \n```\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n annotations:\n kubernetes.io/description: privileged allows full unrestricted access to pod features,\n as if the PodSecurityPolicy controller was not enabled.\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'\n labels:\n eks.amazonaws.com/component: pod-security-policy\n kubernetes.io/cluster-service: \"true\"\n name: eks.privileged\nspec:\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n hostIPC: true\n hostNetwork: true\n hostPID: true\n hostPorts:\n - max: 65535\n min: 0\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n\n```", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0214", - "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "long_description": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host PID namespace.\n\n If you need to run containers which require hostPID, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostPID}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0215", - "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "long_description": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host IPC namespace.\n\n If you have a requirement to containers which require hostIPC, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostIPC}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0216", - "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "long_description": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to share the host network namespace.\n\n If you have need to run containers which require hostNetwork, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.hostNetwork}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0217", - "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", - "long_description": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run.\n\n If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether privileged is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.allowPrivilegeEscalation}'\n\n```\n Verify that there is at least one PSP which does not return true.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods defined with `spec.allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific PSP.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0218", - "name": "CIS-4.2.6 Minimize the admission of root containers", - "description": "Do not generally permit containers to be run as the root user.", - "long_description": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout.\n\n Ideally, all containers should run as a defined non-UID 0 user.\n\n There should be at least one PodSecurityPolicy (PSP) defined which does not permit root users in a container.\n\n If you need to run root containers, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether running containers as root is enabled:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.runAsUser.rule}'\n\n```\n Verify that there is at least one PSP which returns `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Pods with containers which run as the root user will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0219", - "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks.\n\n There should be at least one PodSecurityPolicy (PSP) defined which prevents containers with capabilities beyond the default set from launching.\n\n If you need to run containers with additional capabilities, this should be defined in a separate PSP and you should carefully check RBAC controls to ensure that only limited service accounts and users are given permission to access that PSP.", - "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n Verify that there are no PSPs present which have `allowedCapabilities` set to anything other than an empty array.", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers which require capabilities outwith the default set will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined. If a PSP is created 'allowedCapabilities' is set by default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0220", - "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", - "description": "Do not generally permit containers with capabilities", - "long_description": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities are parts of the rights generally granted on a Linux system to the root user.\n\n In many cases applications running in containers do not require any capabilities to operate, so from the perspective of the principal of least privilege use of capabilities should be minimized.", - "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", - "manual_test": "Get the set of PSPs with the following command:\n\n \n```\nkubectl get psp\n\n```\n For each PSP, check whether capabilities have been forbidden:\n\n \n```\nkubectl get psp -o=jsonpath='{.spec.requiredDropCapabilities}'\n\n```", - "references": [ - "https://kubernetes.io/docs/concepts/policy/pod-security-policy/#enabling-pod-security-policies", - "https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "Pods with containers require capabilities to operate will not be permitted.", - "default_value": "By default, PodSecurityPolicies are not defined.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0221", - "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", - "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", - "long_description": "Vulnerabilities in software packages can be exploited by hackers or malicious users to obtain unauthorized access to local cloud resources. Amazon ECR and other third party products allow images to be scanned for known vulnerabilities.", - "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", - "manual_test": "Please follow AWS ECS or your 3rd party image scanning provider's guidelines for enabling Image Scanning.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "If you are utilizing AWS ECR The following are common image scan failures. You can view errors like this in the Amazon ECR console by displaying the image details or through the API or AWS CLI by using the DescribeImageScanFindings API. UnsupportedImageErrorYou may get an UnsupportedImageError error when attempting to scan an image that was built using an operating system that Amazon ECR doesn't support image scanning for. Amazon ECR supports package vulnerability scanning for major versions of Amazon Linux, Amazon Linux 2, Debian, Ubuntu, CentOS, Oracle Linux, Alpine, and RHEL Linux distributions. Amazon ECR does not support scanning images built from the Docker scratch image. An UNDEFINED severity level is returnedYou may receive a scan finding that has a severity level of UNDEFINED. The following are the common causes for this: The vulnerability was not assigned a priority by the CVE source. The vulnerability was assigned a priority that Amazon ECR did not recognize. To determine the severity and description of a vulnerability, you can view the CVE directly from the source.", - "default_value": "Images are not scanned by Default.", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "controlID": "C-0222", - "name": "CIS-5.1.2 Minimize user access to Amazon ECR", - "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", - "long_description": "Weak access control to Amazon ECR may allow malicious users to replace built images with vulnerable containers.", - "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#scanning-repository" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "Care should be taken not to remove access to Amazon ECR for accounts that require this for their operation.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0223", - "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", - "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", - "long_description": "The Cluster Service Account does not require administrative access to Amazon ECR, only requiring pull access to containers to deploy onto Amazon EKS. Restricting permissions follows the principles of least privilege and prevents credentials from being abused beyond the required role.", - "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", - "manual_test": "Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\n\n If utilizing a 3rd party tool to scan images utilize the minimum required permission level required to interact with the cluster - generally this should be read-only.", - "references": [ - "https://docs.aws.amazon.com/AmazonECR/latest/userguide/ECR_on_EKS.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "A separate dedicated service account may be required for use by build servers and other robot users pushing or managing container images.", - "default_value": "If you used eksctl or the AWS CloudFormation templates in Getting Started with Amazon EKS to create your cluster and worker node groups, these IAM permissions are applied to your worker node IAM role by default.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0225", - "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", - "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", - "long_description": "Manual approaches for authenticating Kubernetes workloads running on Amazon EKS against AWS APIs are: storing service account keys as a Kubernetes secret (which introduces manual key rotation and potential for key compromise); or use of the underlying nodes' IAM Service account, which violates the principle of least privilege on a multi-tenanted node, when one pod needs to have access to a service, but every other pod on the node that uses the Service account does not.", - "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance\u2019s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod\u2019s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege \u2014 By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation \u2014 A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability \u2014 Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", - "manual_test": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults.\n\n Additionally ensure that the automountServiceAccountToken: false setting is in place for each default service account.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html", - "https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-cni-walkthrough.html", - "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#scope-the-iam-role-trust-policy-for-irsa-to-the-service-account-name" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0226", - "name": "CIS-3.3.1 Prefer using a container-optimized OS when possible", - "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", - "long_description": "Container-optimized OSes have a smaller footprint which will reduce the instance's potential attack surface. The container runtime is pre-installed and security settings like locked-down firewall is configured by default. Container-optimized images may also be configured to automatically update on a regular period in the background.", - "remediation": "", - "manual_test": "If a container-optimized OS is required examine the nodes in EC2 and click on their AMI to ensure that it is a container-optimized OS like Amazon Bottlerocket; or connect to the worker node and check its OS.", - "references": [ - "https://aws.amazon.com/blogs/containers/bottlerocket-a-special-purpose-container-operating-system/", - "https://aws.amazon.com/bottlerocket/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "A container-optimized OS may have limited or no support for package managers, execution of non-containerized applications, or ability to install third-party drivers or kernel modules. Conventional remote access to the host (i.e. ssh) may not be possible, with access and debugging being intended via a management tool.", - "default_value": "A container-optimized OS is not the default.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0227", - "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "long_description": "Authorized networks are a way of specifying a restricted range of IP addresses that are permitted to access your cluster's control plane. Kubernetes Engine uses both Transport Layer Security (TLS) and authentication to provide secure access to your cluster's control plane from the public internet. This provides you the flexibility to administer your cluster from anywhere; however, you might want to further restrict access to a set of IP addresses that you control. You can set this restriction by specifying an authorized network.\n\n Restricting access to an authorized network can provide additional security benefits for your container cluster, including:\n\n * Better protection from outsider attacks: Authorized networks provide an additional layer of security by limiting external access to a specific set of addresses you designate, such as those that originate from your premises. This helps protect access to your cluster in the case of a vulnerability in the cluster's authentication or authorization mechanism.\n* Better protection from insider attacks: Authorized networks help protect your cluster from accidental leaks of master certificates from your company's premises. Leaked certificates used from outside Cloud Services and outside the authorized IP ranges (for example, from addresses outside your company) are still denied access.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "When implementing Endpoint Private Access, be careful to ensure all desired networks are on the allowlist (whitelist) to prevent inadvertently blocking external access to your cluster's control plane.", - "default_value": "By default, Endpoint Public Access is disabled.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0228", - "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "long_description": "In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's VPC network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's VPC network.\n\n Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's VPC network to perform any attack on the Kubernetes API.", - "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", - "manual_test": "Check for private endpoint access to the Kubernetes API server", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "Configure the EKS cluster endpoint to be private.\n\n 1. Leave the cluster endpoint public and specify which CIDR blocks can communicate with the cluster endpoint. The blocks are effectively a whitelisted set of public IP addresses that are allowed to access the cluster endpoint.\n2. Configure public access with a set of whitelisted CIDR blocks and set private endpoint access to enabled. This will allow public access from a specific range of public IPs while forcing all network traffic between the kubelets (workers) and the Kubernetes API through the cross-account ENIs that get provisioned into the cluster VPC when the control plane is provisioned.", - "default_value": "By default, the Public Endpoint is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0229", - "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "long_description": "Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.", - "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8.0, - "impact_statement": "To enable Private Nodes, the cluster has to also be configured with a private master IP range and IP Aliasing enabled.\n\n Private Nodes do not have outbound access to the public internet. If you want to provide outbound Internet access for your private nodes, you can use Cloud NAT or you can manage your own NAT gateway.", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0230", - "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", - "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", - "long_description": "By default, all pod to pod traffic within a cluster is allowed. Network Policy creates a pod-level firewall that can be used to restrict traffic between sources. Pod traffic is restricted by having a Network Policy that selects it (through the use of labels). Once there is any Network Policy in a namespace selecting a particular pod, that pod will reject any connections that are not allowed by any Network Policy. Other pods in the namespace that are not selected by any Network Policy will continue to accept all traffic.\n\n Network Policies are managed via the Kubernetes Network Policy API and enforced by a network plugin, simply creating the resource without a compatible network plugin to implement it will have no effect.", - "remediation": "", - "manual_test": "", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy.\n\n Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion.\n\n Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.", - "default_value": "By default, Network Policy is disabled.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0231", - "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "long_description": "Encrypting traffic between users and your Kubernetes workload is fundamental to protecting data sent over the web.", - "remediation": "", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/data-protection.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5.0, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0232", - "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", - "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", - "long_description": "On- and off-boarding users is often difficult to automate and prone to error. Using a single source of truth for user permissions reduces the number of locations that an individual must be off-boarded from, and prevents users gaining unique permissions sets that increase the cost of audit.", - "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", - "manual_test": "To Audit access to the namespace $NAMESPACE, assume the IAM role yourIAMRoleName for a user that you created, and then run the following command:\n\n \n```\n$ kubectl get role -n $NAMESPACE\n\n```\n The response lists the RBAC role that has access to this Namespace.", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html", - "https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 7, - "impact_statement": "Users must now be assigned to the IAM group created to use this namespace and deploy applications. If they are not they will not be able to access the namespace or deploy.", - "default_value": "For role-based access control (RBAC), system:masters permissions are configured in the Amazon EKS control plane", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0233", - "name": "CIS-5.6.1 Consider Fargate for running untrusted workloads", - "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", - "long_description": "", - "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", - "manual_test": "", - "references": [ - "https://docs.aws.amazon.com/eks/latest/userguide/fargate.html" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 3, - "impact_statement": "", - "default_value": "By default, AWS Fargate is not utilized.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0234", - "name": "CIS-4.4.2 Consider external secret storage", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "long_description": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "manual_test": "Review your secrets management implementation.", - "references": [], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None", - "default_value": "By default, no external secret management is configured.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - }, - { - "controlID": "C-0235", - "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", - "long_description": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", - "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", - "manual_test": "First, SSH to the relevant worker node:\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate Kubelet config file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--config /etc/kubernetes/kubelet/kubelet-config.json` which is the location of the Kubelet config file.\n\n Run the following command:\n\n \n```\nstat -c %a /etc/kubernetes/kubelet/kubelet-config.json\n\n```\n The output of the above command is the Kubelet config file's permissions. Verify that the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6.0, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0238", - "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", - "long_description": "The `kubelet` kubeconfig file controls various parameters of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.\n\n It is possible to run `kubelet` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", - "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", - "manual_test": "SSH to the worker nodes\n\n To check to see if the Kubelet Service is running:\n\n \n```\nsudo systemctl status kubelet\n\n```\n The output should return `Active: active (running) since..`\n\n Run the following command on each node to find the appropriate kubeconfig file:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command should return something similar to `--kubeconfig /var/lib/kubelet/kubeconfig` which is the location of the kubeconfig file.\n\n Run this command to obtain the kubeconfig file permissions:\n\n \n```\nstat -c %a /var/lib/kubelet/kubeconfig\n\n```\n The output of the above command gives you the kubeconfig file's permissions.\n\n Verify that if a file is specified and it exists, the permissions are `644` or more restrictive.", - "references": [ - "https://kubernetes.io/docs/admin/kube-proxy/" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 6, - "impact_statement": "None.", - "default_value": "See the AWS EKS documentation for the default value.", - "scanningScope": { - "matches": [ - "EKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0242", - "name": "CIS-5.6.2 Hostile multi-tenant workloads", - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", - "long_description": "", - "remediation": "", - "manual_test": "", - "references": [ - "" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "impact_statement": "", - "default_value": "", - "scanningScope": { - "matches": [ - "AKS" - ] - }, - "rules": [] - }, - { - "controlID": "C-0246", - "name": "CIS-4.1.7 Avoid use of system:masters group", - "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", - "long_description": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed.\n\n When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", - "remediation": "Remove the `system:masters` group from all users in the cluster.", - "manual_test": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", - "references": [ - "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", - "default_value": "By default some clusters will create a \"break glass\" client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations.", - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0066", - "C-0067", - "C-0078", - "C-0167", - "C-0171", - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0183", - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191", - "C-0205", - "C-0206", - "C-0207", - "C-0209", - "C-0211", - "C-0212", - "C-0213", - "C-0214", - "C-0215", - "C-0216", - "C-0217", - "C-0218", - "C-0219", - "C-0220", - "C-0221", - "C-0222", - "C-0223", - "C-0225", - "C-0226", - "C-0227", - "C-0228", - "C-0229", - "C-0230", - "C-0231", - "C-0232", - "C-0233", - "C-0234", - "C-0235", - "C-0238", - "C-0242", - "C-0246" - ] - }, - { - "name": "DevOpsBest", - "description": "", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "actionRequired": "configuration" - }, - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0004", - "example": "@controls/examples/c004.yaml", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "long_description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "controlID": "C-0018", - "example": "@controls/examples/c018.yaml", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Resources CPU limit and request", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "controlID": "C-0050", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "long_description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "controlID": "C-0056", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "long_description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This may lead to wrong capabilities and permissions assignment and potential compromises. This control identifies all the PODs running in the default namespace.", - "test": "Check that there are no pods in the 'default' namespace", - "controlID": "C-0061", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "long_description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "test": "Test if PODs are not associated with Deployment, ReplicaSet etc. If not, fail.", - "controlID": "C-0073", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "long_description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "test": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "controlID": "C-0074", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "long_description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always. Note as well that some vendors don't use the word latest in the tag. Some other word may also behave like the latest. For example, Redis uses redis:alpine to signify the latest. Therefore, this control treats any word that does not contain digits as the latest. If no tag is specified, the image is treated as latests too.", - "test": "If imagePullPolicy = always pass, else fail.", - "controlID": "C-0075", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Label usage for resources", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "long_description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "test": "Test will check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "controlID": "C-0076", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "K8s common labels usage", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "long_description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "test": "Test will check if the list of label that start with app.kubernetes.io/ are defined.", - "controlID": "C-0077", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Deprecated Kubernetes image registry", - "attributes": { - "armoBuiltin": true - }, - "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", - "long_description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", - "test": "Checking images in kube-system namespace, if the registry of the image is from the old registry we raise an alert.", - "controlID": "C-0253", - "baseScore": 5.0, - "example": "@controls/examples/c239.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0004", - "C-0018", - "C-0044", - "C-0050", - "C-0056", - "C-0061", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0253" - ] - } -] \ No newline at end of file diff --git a/releaseDev/mitre.json b/releaseDev/mitre.json deleted file mode 100644 index 0b5143995..000000000 --- a/releaseDev/mitre.json +++ /dev/null @@ -1,2616 +0,0 @@ -{ - "name": "MITRE", - "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "long_description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources.", - "test": "Check which subjects have delete/deletecollection RBAC permissions on workloads.", - "controlID": "C-0007", - "baseScore": 5, - "example": "@controls/examples/c007.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-excessive-delete-rights", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" - }, - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - } - ] - }, - { - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the \u201cKubernetes Dashboard\u201d service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "long_description": "The Kubernetes dashboard is a web-based UI that is used for monitoring and managing the Kubernetes cluster. The dashboard allows users to perform actions in the cluster using its service account (Kubernetes-dashboard) with the permissions that are determined by the binding or cluster-binding for this service account. Attackers who gain access to a container in the cluster, can use its network access to the dashboard pod. Consequently, attackers may retrieve information about the various resources in the cluster using the dashboard\u2019s identity.", - "test": "Check who is associated with the dashboard service account or bound to dashboard role/clusterrole.", - "controlID": "C-0014", - "baseScore": 2.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-access-dashboard", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "List Kubernetes secrets", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "long_description": "A Kubernetes secret is an object that lets users store and manage sensitive information, such as passwords and connection strings in the cluster. Secrets can be consumed by reference in the pod configuration. Attackers who have permissions to retrieve the secrets from the API server (by using the pod service account, for example) can access sensitive information that might include credentials to various services.", - "test": "Alerting on users which have get/list/watch RBAC permissions on secrets. ", - "controlID": "C-0015", - "baseScore": 7.0, - "example": "@controls/examples/c015.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-list-get-secrets", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "long_description": "When the cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. For example, in AKS each node contains service principal credential.", - "test": "Check which workloads have volumes with potential access to known cloud credentials folders or files in node, like \u201c/etc/kubernetes/azure.json\u201d for Azure.", - "controlID": "C-0020", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-mount-potential-credentials-paths", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - } - ] - }, - { - "name": "Exposed sensitive interfaces", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "long_description": "Exposing a sensitive interface to the internet poses a security risk. Some popular frameworks were not intended to be exposed to the internet, and therefore don\u2019t require authentication by default. Thus, exposing them to the internet allows unauthenticated access to a sensitive interface which might enable running code or deploying containers in the cluster by a malicious actor. Examples of such interfaces that were seen exploited include Apache NiFi, Kubeflow, Argo Workflows, Weave Scope, and the Kubernetes dashboard.", - "test": "Checking if a service of type nodeport/loadbalancer to one of the known exploited interfaces (Apache NiFi, Kubeflow, Argo Workflows, Weave Scope Kubernetes dashboard) exists. Needs to add user config", - "controlID": "C-0021", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "exposed-sensitive-interfaces", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.servicesNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.servicesNames", - "name": "Service names", - "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" - }, - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - } - ] - }, - { - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "long_description": "Kubernetes Job is a controller that creates one or more pods and ensures that a specified number of them successfully terminate. Kubernetes Job can be used to run containers that perform finite tasks for batch jobs. Kubernetes CronJob is used to schedule Jobs. Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a container in the cluster.", - "test": "We list all CronJobs that exist in cluster for the user to approve.", - "controlID": "C-0026", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", - "armoBuiltin": true - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "long_description": "A Kubernetes event is a Kubernetes object that logs state changes and failures of the resources in the cluster. Example events are a container creation, an image pull, or a pod scheduling on a node. Kubernetes events can be very useful for identifying changes that occur in the cluster. Therefore, attackers may want to delete these events (e.g., by using: \u201ckubectl delete events\u2013all\u201d) in an attempt to avoid detection of their activity in the cluster.", - "test": "List who has delete/deletecollection RBAC permissions on events.", - "controlID": "C-0031", - "baseScore": 4.0, - "example": "@controls/examples/c031.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-delete-k8s-events", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" - }, - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0036", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "CoreDNS poisoning", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster\u2019s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "long_description": "CoreDNS is a modular Domain Name System (DNS) server written in Go, hosted by Cloud Native Computing Foundation (CNCF). CoreDNS is the main DNS service that is being used in Kubernetes. The configuration of CoreDNS can be modified by a file named corefile. In Kubernetes, this file is stored in a ConfigMap object, located at the kube-system namespace. If attackers have permissions to modify the ConfigMap, for example by using the container\u2019s service account, they can change the behavior of the cluster\u2019s DNS, poison it, and take the network identity of other services.", - "test": "Check who has update/patch RBAC permissions on \u2018coredns\u2019 configmaps, or to all configmaps.", - "controlID": "C-0037", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-update-configmap", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding", - "ConfigMap" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-update-configmap-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Malicious admission controller (mutating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "controlID": "C-0039", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "long_description": "SSH server that is running inside a container may be used by attackers. If attackers gain valid credentials to a container, whether by brute force attempts or by other methods (such as phishing), they can use it to get remote access to the container by SSH.", - "test": "Check if service connected to some workload has an SSH port (22/2222). If so we raise an alert. ", - "controlID": "C-0042", - "baseScore": 3.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-can-ssh-to-pod", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" - }, - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "long_description": "Cloud providers provide instance metadata service for retrieving information about the virtual machine, such as network configuration, disks, and SSH public keys. This service is accessible to the VMs via a non-routable IP address that can be accessed from within the VM only. Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. For example, in Azure, the following request would retrieve all the metadata information of an instance: http:///metadata/instance?api-version=2019-06-01\\n\\n", - "test": "Check which nodes have access to instance metadata services. The check is for AWS, GCP and Azure.", - "controlID": "C-0052", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - } - ] - }, - { - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "long_description": "Service account (SA) represents an application identity in Kubernetes. By default, an SA is mounted to every created pod in the cluster. Using the SA, containers in the pod can send requests to the Kubernetes API server. Attackers who get access to a pod can access the SA token (located in /var/run/secrets/kubernetes.io/serviceaccount/token) and perform actions in the cluster, according to the SA permissions. If RBAC is not enabled, the SA has unlimited permissions in the cluster. If RBAC is enabled, its permissions are determined by the RoleBindings\\\\ClusterRoleBindings that are associated with it.", - "test": "Control checks if RBAC is enabled. If it's not, the SA has unlimited permissions. If RBAC is enabled, it lists all permissions for each SA.", - "controlID": "C-0053", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "access-container-service-account", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" - }, - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0007", - "C-0012", - "C-0014", - "C-0015", - "C-0020", - "C-0021", - "C-0026", - "C-0031", - "C-0035", - "C-0036", - "C-0037", - "C-0039", - "C-0042", - "C-0045", - "C-0048", - "C-0052", - "C-0053", - "C-0054", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] -} \ No newline at end of file diff --git a/releaseDev/nsa.json b/releaseDev/nsa.json deleted file mode 100644 index 821100839..000000000 --- a/releaseDev/nsa.json +++ /dev/null @@ -1,2145 +0,0 @@ -{ - "name": "NSA", - "description": "Implement NSA security advices for K8s ", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "compliance" - ], - "version": null, - "controls": [ - { - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" - ] - }, - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using \u201ckubectl exec\u201d command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit \u201ckubectl exec\u201d command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "long_description": "Attackers who have permissions, can run malicious commands in containers in the cluster using exec command (\u201ckubectl exec\u201d). In this method, attackers can use legitimate images, such as an OS image (e.g., Ubuntu) as a backdoor container, and run their malicious code remotely by using \u201ckubectl exec\u201d.", - "test": "Check which subjects have RBAC permissions to exec into pods\u2013 if they have the \u201cpods/exec\u201d verb.", - "controlID": "C-0002", - "baseScore": 5.0, - "example": "@controls/examples/c002.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "long_description": "The control plane is the core of Kubernetes and gives users the ability to view containers, schedule new Pods, read Secrets, and execute commands in the cluster. Therefore, it should be protected. It is recommended to avoid control plane exposure to the Internet or to an untrusted network. The API server runs on ports 6443 and 8080. We recommend to block them in the firewall. Note also that port 8080, when accessed through the local machine, does not require TLS encryption, and the requests bypass authentication and authorization modules.", - "test": "Check if the insecure-port flag is set (in case of cloud vendor hosted Kubernetes service this verification will not be effective).", - "controlID": "C-0005", - "baseScore": 9, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "insecure-port-flag", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - } - ] - }, - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - } - ] - }, - { - "name": "Applications credentials in configuration files", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "long_description": "Developers store secrets in the Kubernetes configuration files, such as environment variables in the pod configuration. Such behavior is commonly seen in clusters that are monitored by Azure Security Center. Attackers who have access to those configurations, by querying the API server or by accessing those files on the developer\u2019s endpoint, can steal the stored secrets and use them.", - "test": "Check if the pod has sensitive information in environment variables, by using list of known sensitive key names. Check if there are configmaps with sensitive information.", - "controlID": "C-0012", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - } - ] - }, - { - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "long_description": "Container engines allow containers to run applications as a non-root user with non-root group membership. Typically, this non-default setting is configured when the container image is built. . Alternatively, Kubernetes can load containers into a Pod with SecurityContext:runAsUser specifying a non-zero user. While the runAsUser directive effectively forces non-root execution at deployment, NSA and CISA encourage developers to build container applications to execute as a non-root user. Having non-root execution integrated at build time provides better assurance that applications will function correctly without root privileges.", - "test": "Verify if runAsUser and runAsGroup are set to a user id greater than 999. Check that the allowPrivilegeEscalation field is set to false. Check all the combinations with PodSecurityContext and SecurityContext (for containers).", - "controlID": "C-0013", - "baseScore": 6.0, - "example": "@controls/examples/c013.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - } - ] - }, - { - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "test": " Check that the allowPrivilegeEscalation field in securityContext of container is set to false. ", - "controlID": "C-0016", - "baseScore": 6.0, - "example": "@controls/examples/allowprivilegeescalation.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-allow-privilege-escalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - } - ] - }, - { - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" - ] - }, - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "long_description": "Network policies control traffic flow between Pods, namespaces, and external IP addresses. By default, no network policies are applied to Pods or namespaces, resulting in unrestricted ingress and egress traffic within the Pod network. Pods become isolated through a network policy that applies to the Pod or the Pod\u2019s namespace. Once a Pod is selected in a network policy, it rejects any connections that are not specifically allowed by any applicable policy object.Administrators should use a default policy selecting all Pods to deny all ingress and egress traffic and ensure any unselected Pods are isolated. Additional policies could then relax these restrictions for permissible connections.", - "test": "Check for each Pod whether there is an ingress and egress policy defined (whether using Pod or Namespace). ", - "controlID": "C-0030", - "baseScore": 6.0, - "example": "@controls/examples/c030.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ingress-and-egress-blocked", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - } - ] - }, - { - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "long_description": "We have it in Armo best (Automatic mapping of service account token).", - "test": "Check all service accounts on which automount is not disabled. Check all workloads on which they and their service account don't disable automount ", - "controlID": "C-0034", - "baseScore": 6.0, - "example": "@controls/examples/c034.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Cluster-admin binding", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "long_description": "Role-based access control (RBAC) is a key security feature in Kubernetes. RBAC can restrict the allowed actions of the various identities in the cluster. Cluster-admin is a built-in high privileged role in Kubernetes. Attackers who have permissions to create bindings and cluster-bindings in the cluster can create a binding to the cluster-admin ClusterRole or to other high privileges roles.", - "test": "Check which subjects have cluster-admin RBAC permissions \u2013 either by being bound to the cluster-admin clusterrole, or by having equivalent high privileges. ", - "controlID": "C-0035", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } - ] - }, - { - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "long_description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "controlID": "C-0038", - "baseScore": 7.0, - "example": "@controls/examples/c038.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-pid-ipc-privileges", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "Cluster internal networking", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ] - }, - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "long_description": "Kubernetes networking behavior allows traffic between pods in the cluster as a default behavior. Attackers who gain access to a single container may use it for network reachability to another container in the cluster.", - "test": "Check for each namespace if there is a network policy defined.", - "controlID": "C-0054", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "long_description": "In order to reduce the attack surface, it is recommend, when it is possible, to harden your application using security services such as SELinux\u00ae, AppArmor\u00ae, and seccomp. Starting from Kubernetes version 22, SELinux is enabled by default. ", - "test": "Check if there is AppArmor or Seccomp or SELinux or Capabilities are defined in the securityContext of container and pod. If none of these fields are defined for both the container and pod, alert.", - "controlID": "C-0055", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "linux-hardening", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - } - ] - }, - { - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: privileged\nspec:\n containers:\n - name: pause\n image: k8s.gcr.io/pause\n securityContext:\n privileged: true # This field triggers failure!\n", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "long_description": "A privileged container is a container that has all the capabilities of the host machine, which lifts all the limitations regular containers have. Practically, this means that privileged containers can do almost every action that can be performed directly on the host. Attackers who gain access to a privileged container or have permissions to create a new privileged container (by using the compromised pod\u2019s service account, for example), can get access to the host\u2019s resources.", - "test": "Check in POD spec if securityContext.privileged == true, if so raise an alert.", - "controlID": "C-0057", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - } - ] - }, - { - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "controlID": "C-0058", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - } - ] - }, - { - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "test": "The control checks if the nginx-ingress-controller contains the ability to disable allowSnippetAnnotations and that indeed this feature is turned off", - "controlID": "C-0059", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - } - ] - }, - { - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ] - }, - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "long_description": "etcd is a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data. All object data in Kubernetes, like secrets, are stored there. This is the reason why it is important to protect the contents of etcd and use its data encryption feature.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if etcd encryption is enabled", - "controlID": "C-0066", - "baseScore": 6.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - } - ] - }, - { - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "long_description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if audit logging is enabled", - "controlID": "C-0067", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "long_description": "Pod Security Policies enable fine-grained authorization of pod creation and updates and it extends authorization beyond RBAC. It is an important to use PSP to control the creation of sensitive PODs in your cluster.", - "test": "Reading the cluster description from the managed cloud API (EKS, GKE), or the API server pod configuration for native K8s and checking if PSP is enabled", - "controlID": "C-0068", - "baseScore": 1.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - } - ] - }, - { - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "long_description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "test": "Reading the kubelet command lines and configuration file looking for anonymous-auth configuration. If this configuration is set on both, the command line values take precedence over it.", - "controlID": "C-0069", - "baseScore": 10.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - }, - { - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "long_description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "test": "Reading the kubelet command lines and configuration file looking for client TLS configuration.", - "controlID": "C-0070", - "baseScore": 9.0, - "scanningScope": { - "matches": [ - "cluster" - ] - }, - "rules": [ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0002", - "C-0005", - "C-0009", - "C-0012", - "C-0013", - "C-0016", - "C-0017", - "C-0030", - "C-0034", - "C-0035", - "C-0038", - "C-0041", - "C-0044", - "C-0046", - "C-0054", - "C-0055", - "C-0057", - "C-0058", - "C-0059", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070" - ] -} \ No newline at end of file diff --git a/releaseDev/rules.json b/releaseDev/rules.json deleted file mode 100644 index ca7448d03..000000000 --- a/releaseDev/rules.json +++ /dev/null @@ -1,8953 +0,0 @@ -[ - { - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", - "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "CVE-2022-23648", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "resources-other1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ControllerRevision" - ] - }, - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - }, - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - }, - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n" - }, - { - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-clusters-are-created-with-private-nodes", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", - "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "alert-mount-potential-credentials-paths", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - }, - { - "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "insecure-port-flag", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n" - }, - { - "name": "etcd-peer-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "rule-excessive-delete-rights-v1", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "exposed-rce-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-rce-pods", - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.0.150", - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n \ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" - }, - { - "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}" - }, - { - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}" - }, - { - "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n" - }, - { - "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", - "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", - "ruleQuery": "armo_builtin", - "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n" - }, - { - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "cluster-admin-role", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [], - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n" - }, - { - "name": "automount-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - }, - { - "name": "has-image-signature", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-can-list-get-secrets", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can list/get secrets\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "etcd-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns validating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}" - }, - { - "name": "etcd-peer-tls-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "rule-can-delete-k8s-events", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}" - }, - { - "name": "ensure-network-policy-is-enabled-eks", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n" - }, - { - "name": "kubelet-rotate-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - }, - { - "name": "kubelet-ip-tables", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "workload-mounted-secrets", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Secret" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "set-systctls-params", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.systctls is not set.", - "remediation": "Set securityContext.systctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}" - }, - { - "name": "etcd-unique-ca", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := regex.split(\"=\", command)\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n" - }, - { - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", - "armoBuiltin": true - }, - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n" - }, - { - "name": "etcd-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "access-container-service-account", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n" - }, - { - "name": "anonymous-access-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous access is enabled on the cluster", - "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" - }, - { - "name": "ensure-aws-policies-are-present", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "fails if aws policies are not found", - "remediation": "Implement policies to minimize user access to Amazon ECR", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n" - }, - { - "name": "alert-container-optimized-os-not-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}" - }, - { - "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1\n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}" - }, - { - "name": "psp-deny-privileged-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "psp-deny-hostnetwork", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n" - }, - { - "name": "kubelet-protect-kernel-defaults", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the --protect-kernel-defaults argument is set to true.", - "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "rule-excessive-delete-rights", - "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n" - }, - { - "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "resources-secret-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Secret" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rule-can-bind-escalate", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "resources-rbac-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rbac-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "container.googleapis.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS", - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "kubelet-rotate-kubelet-server-certificate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-hostile-multitenant-workloads", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "ruleDependencies": [], - "configInputs": [], - "controlConfigInputs": [], - "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", - "remediation": "Use physically isolated clusters", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n" - }, - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "exposed-critical-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-critical-pods", - "armoBuiltin": true, - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "description": "Fails if pods have exposed services as well as critical vulnerabilities", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n \n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}" - }, - { - "name": "configured-liveness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "list-all-namespaces", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - } - ], - "ruleDependencies": [], - "description": "lists all namespaces for users to review", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "excessive_amount_of_vulnerabilities_pods", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133", - "imageScanRelated": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "configInputs": [ - "settings.postureControlInputs.max_critical_vulnerabilities", - "settings.postureControlInputs.max_high_vulnerabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.max_critical_vulnerabilities", - "name": "Max critical vulnerabilities", - "description": "Maximum amount of allowed critical risk vulnerabilities" - }, - { - "path": "settings.postureControlInputs.max_high_vulnerabilities", - "name": "Max high vulnerabilities", - "description": "Maximum amount of allowed high risk vulnerabilities" - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n\n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}" - }, - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-access-dashboard", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - }, - { - "name": "rule-can-portforward", - "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n" - }, - { - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n" - }, - { - "name": "label-usage-for-resources", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following labels." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n" - }, - { - "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "workload-mounted-pvc", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts PVC", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-service-principle-has-read-only-permissions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - }, - { - "name": "pod-security-admission-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "set-seLinuxOptions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.publicRegistries", - "name": "Public registries", - "description": "Kubescape checks none of these public registries are in use." - }, - { - "path": "settings.postureControlInputs.untrustedRegistries", - "name": "Registries block list", - "description": "Kubescape checks none of the following registries are in use." - } - ], - "description": "Identifying if pod container images are from unallowed registries", - "remediation": "Use images from safe registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}" - }, - { - "name": "etcd-tls-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Configure TLS encryption for the etcd service.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "CNIInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "CVE-2022-3172", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apiregistration.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "APIService" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "apiserverinfo.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", - "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n" - }, - { - "name": "ingress-and-egress-blocked", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}" - }, - { - "name": "ensure-external-secrets-storage-is-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n" - }, - { - "name": "psp-required-drop-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n" - }, - { - "name": "rule-can-update-configmap", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding", - "ConfigMap" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "psp-deny-hostipc", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - }, - { - "name": "resources-other2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress", - "NetworkPolicy" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "image-pull-policy-is-not-set-to-always", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n" - }, - { - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-azure-rbac-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", - "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n" - }, - { - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}" - }, - { - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "set-procmount-default", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" - }, - { - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "restrict-access-to-the-control-plane-endpoint", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "false", - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n" - }, - { - "name": "k8s-audit-logs-enabled-native-cis", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "psp-deny-allowed-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "alert-fargate-not-in-use", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "ruleDependencies": [], - "relevantCloudProviders": [ - "EKS" - ], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n" - }, - { - "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - }, - { - "name": "naked-pods", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "sudo-in-container-entrypoint", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n" - }, - { - "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "kubelet-event-qps", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "set-seccomp-profile-RuntimeDefault", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile as RuntimeDefault", - "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n" - }, - { - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - }, - { - "name": "CVE-2022-39328", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "ensure-endpointprivateaccess-is-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n" - }, - { - "name": "list-all-mutating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Malicious admission controller", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}" - }, - { - "name": "namespace-without-service-account", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "review-roles-with-aws-iam-authenticator", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "ensure-image-scanning-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "DescribeRepositories" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}" - }, - { - "name": "external-secret-storage", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n" - }, - { - "name": "verify-image-signature", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.1.3" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "Trusted Cosign public keys" - } - ], - "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n" - }, - { - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n" - }, - { - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}" - }, - { - "name": "linux-hardening", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n" - }, - { - "name": "psp-deny-root-container", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}" - }, - { - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}" - }, - { - "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " - }, - { - "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n" - }, - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - }, - { - "name": "resources-cpu-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.cpu_request_max", - "settings.postureControlInputs.cpu_request_min", - "settings.postureControlInputs.cpu_limit_min", - "settings.postureControlInputs.cpu_limit_max" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.cpu_request_max", - "name": "cpu_request_max", - "description": "Ensure CPU max requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_request_min", - "name": "cpu_request_min", - "description": "Ensure CPU min requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_max", - "name": "cpu_limit_max", - "description": "Ensure CPU max limits are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_min", - "name": "cpu_limit_min", - "description": "Ensure CPU min limits are set" - } - ], - "description": "CPU limits and requests are not set.", - "remediation": "Ensure CPU limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - }, - { - "name": "read-only-port-enabled-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "host-pid-ipc-privileges", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}" - }, - { - "name": "resources-core1-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap", - "Endpoints", - "LimitRange", - "PersistentVolumeClaim", - "PodTemplate" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n" - }, - { - "name": "CVE-2022-0492", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.containers[container_ndx].runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n" - }, - { - "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "rule-cni-enabled-aks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n" - }, - { - "name": "psp-deny-hostpid", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n" - }, - { - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "exposure-to-internet", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "extensions", - "networking.k8s.io" - ], - "apiVersions": [ - "v1beta1", - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" - }, - { - "name": "psp-deny-allowprivilegeescalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n" - }, - { - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}" - }, - { - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n" - }, - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}" - }, - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" - }, - { - "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n" - }, - { - "name": "resources-event-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "events.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "rule-secrets-in-env-var", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "etcd-peer-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n" - }, - { - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}" - }, - { - "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "rule-can-update-configmap-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "list-role-definitions-in-acr", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "management.azure.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - } - ], - "relevantCloudProviders": [ - "AKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n" - }, - { - "name": "CVE-2022-24348", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", - "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-manual", - "attributes": { - "armoBuiltin": true, - "actionRequired": "manual review", - "hostSensorRule": false, - "imageScanRelated": false - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", - "remediation": "", - "ruleQuery": "", - "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}" - }, - { - "name": "containers-mounting-docker-socket", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n" - }, - { - "name": "workload-mounted-configmap", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts ConfigMaps", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - }, - { - "name": "rbac-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}" - }, - { - "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "exposed-sensitive-interfaces", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.servicesNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.servicesNames", - "name": "Service names", - "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n" - }, - { - "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "configured-readiness-probe", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Readiness probe is not configured", - "remediation": "Ensure Readiness probe is configured", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "rule-can-impersonate-users-groups", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "resourceCount": "subjects", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}" - }, - { - "name": "rule-can-ssh-to-pod", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n" - }, - { - "name": "ensure-default-service-accounts-has-only-default-roles", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n" - }, - { - "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "CVE-2022-0185", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n" - }, - { - "name": "CVE-2022-47633", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n" - }, - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - }, - { - "name": "serviceaccount-token-mount", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n beggining_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, beggining_of_path, [])\n\n wl_namespace := wl.metadata.namespace\n result := is_sa_auto_mounted(spec, beggining_of_path, wl_namespace)\n \n sa := input[_]\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata.namespace , wl_namespace)\n has_service_account_binding(sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = beggining_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n beggining_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"Pod\"\n beggining_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"CronJob\"\n beggining_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n" - }, - { - "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n" - }, - { - "name": "resources-memory-limit-and-request", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.memory_request_max", - "settings.postureControlInputs.memory_request_min", - "settings.postureControlInputs.memory_limit_max", - "settings.postureControlInputs.memory_limit_min" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.memory_request_max", - "name": "memory_request_max", - "description": "Ensure memory max requests are set" - }, - { - "path": "settings.postureControlInputs.memory_request_min", - "name": "memory_request_min", - "description": "Ensure memory min requests are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_max", - "name": "memory_limit_max", - "description": "Ensure memory max limits are set" - }, - { - "path": "settings.postureControlInputs.memory_limit_min", - "name": "memory_limit_min", - "description": "Ensure memory min limits are set" - } - ], - "description": "memory limits and requests are not set.", - "remediation": "Ensure memory limits and requests are set.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n" - }, - { - "name": "rule-allow-privilege-escalation", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "PodSecurityPolicy" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container allows privilege escalation", - "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "rule-can-create-pod", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "audit-policy-content", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n#rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}" - }, - { - "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "exec-into-container-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - }, - { - "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "kubelet-hostname-override", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n" - }, - { - "name": "resources-core2-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ReplicationController", - "ResourceQuota", - "ServiceAccount", - "Service" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n" - }, - { - "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n" - }, - { - "name": "kubelet-strong-cryptographics-ciphers", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n" - }, - { - "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "rule-identify-old-k8s-registry", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Identifying if pod container images are from deprecated K8s registry", - "remediation": "Use images new registry", - "ruleQuery": "", - "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n" - }, - { - "name": "host-network-access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - }, - { - "name": "k8s-audit-logs-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ClusterDescribe" - ] - } - ], - "relevantCloudProviders": [ - "EKS", - "GKE" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n" - }, - { - "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.2.5" - }, - "ruleLanguage": "Rego", - "dynamicMatch": [ - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ListEntitiesForPolicies" - ] - }, - { - "apiGroups": [ - "eks.amazonaws.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PolicyVersion" - ] - } - ], - "relevantCloudProviders": [ - "EKS" - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t#node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n" - }, - { - "name": "rule-can-portforward-v1", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n" - } -] \ No newline at end of file diff --git a/releaseDev/security.json b/releaseDev/security.json deleted file mode 100644 index 473f2af62..000000000 --- a/releaseDev/security.json +++ /dev/null @@ -1,1949 +0,0 @@ -{ - "name": "security", - "description": "Controls that are used to assess security threats.", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}" - } - ] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - } - ] - }, - { - "name": "Exposure to Internet", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Workload Exposure" - ] - }, - { - "attackTrack": "", - "categories": [ - "" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "exposure-to-internet", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "extensions", - "networking.k8s.io" - ], - "apiVersions": [ - "v1beta1", - "v1" - ], - "resources": [ - "Ingress" - ] - } - ], - "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n" - } - ] - }, - { - "name": "Workload with credential access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value)\n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}" - } - ] - }, - { - "name": "Workload with configMap access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-mounted-configmap", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts ConfigMaps", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Workload with PVC access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-mounted-pvc", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts PVC", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "Missing network policy", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Network" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure_network_policy_configured_in_labels", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ConfigMap" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "description": "fails if no networkpolicy configured in workload labels", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; networkpolicy.kind == \"NetworkPolicy\"]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n count(networkpolicy.spec.podSelector) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "serviceaccount-token-mount", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n beggining_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, beggining_of_path, [])\n\n wl_namespace := wl.metadata.namespace\n result := is_sa_auto_mounted(spec, beggining_of_path, wl_namespace)\n \n sa := input[_]\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata.namespace , wl_namespace)\n has_service_account_binding(sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = beggining_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n beggining_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"Pod\"\n beggining_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = beggining_of_path {\n workload.kind == \"CronJob\"\n beggining_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", beggining_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n" - } - ] - }, - { - "name": "Workload with secret access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "workload-mounted-secrets", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Secret" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "description": "fails if workload mounts secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[_].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts\", [concat(\".\", containers_path), j])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}" - } - ] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "host-network-access", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if pod has hostNetwork enabled", - "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}" - } - ] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n" - } - ] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-rw-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} " - } - ] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren\u2019t necessary for the container.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}" - } - ] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}" - } - ] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [ - { - "name": "rule-privilege-escalation", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation", - "mitreCode": "TA0004", - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}" - }, - { - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n" - }, - { - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n" - }, - { - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n" - }, - { - "name": "set-seLinuxOptions", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if workload and container do not define any seLinuxOptions", - "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}" - }, - { - "name": "set-procmount-default", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if container does not define securityContext.procMount to Default.", - "remediation": "Set securityContext.procMount to Default", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if procMount paramenter has the right value in containers\n pod := input[_]\n pod.kind = \"Pod\"\n\n\t# retrieve container list\n container := pod.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # retrieve container list\n container := wl.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeny[msga] {\n # checks at first if we the procMountType feature gate is enabled on the api-server\n obj := input[_]\n is_control_plane_info(obj)\n is_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n # checks if we are managing the right workload kind\n cj := input[_]\n cj.kind = \"CronJob\"\n\n # retrieve container list\n container := cj.spec.jobTemplate.spec.template.spec.containers[i]\n container.securityContext.procMount != \"Default\"\n\n path := sprintf(\"containers[%d].securityContext.procMount\", [i])\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"ProcMountType=true\", args[i])\n}\n" - }, - { - "name": "set-fsgroup-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(pod.spec.securityContext)\n\n\n securityContextPath := \"spec.securityContext\"\n\n paths := get_paths(pod, securityContextPath)\n \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n securityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n paths := get_paths(cj, securityContextPath)\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n\n # check securityContext has fsGroup set properly\n not fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n path := \"spec.template.spec.securityContext\"\n paths := get_paths(wl, path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": paths[\"failedPaths\"],\n\t\t\"fixPaths\": paths[\"fixPaths\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) := true if {\n securityContext.fsGroup >= 0\n} else := false\n\n\nget_paths(resources, securityContextPath) := result { \n\n objectPath := array.concat(split(securityContextPath, \".\"), [\"fsGroup\"])\n object.get(resources, objectPath, false)\n\n result = {\"failedPaths\": [], \"fixPaths\": [{\"path\":sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]}\n} else = result {\n result = {\"failedPaths\": [securityContextPath], \"fixPaths\": []}\n}\n" - }, - { - "name": "set-fsgroupchangepolicy-value", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.fsGroup is not set.", - "remediation": "Set securityContext.fsGroup value", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n" - }, - { - "name": "set-systctls-params", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.systctls is not set.", - "remediation": "Set securityContext.systctls params", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has systctls set\n not pod.spec.securityContext.systctls\n\n path := \"spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.systctls'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has systctls set\n not wl.spec.template.spec.securityContext.systctls\n\n path := \"spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.systctls'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.systctls is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has systctls set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.systctls\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext.systctls\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.systctls'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": path, \"name\": \"net.ipv4.tcp_syncookie\", \"value\": \"1\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - }, - { - "name": "set-supplementalgroups-values", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "description": "Fails if securityContext.supplementalgroups is not set.", - "remediation": "Set securityContext.supplementalgroups values", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has supplementalGroups set\n not pod.spec.securityContext.supplementalGroups\n\n path := \"spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.supplementalGroups'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has supplementalGroups set\n not wl.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.supplementalGroups'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups is not set\ndeny[msga] {\n # verify the object kind\n\tcj := input[_]\n cj.kind == \"CronJob\"\n\n\t# check securityContext has supplementalGroups set\n not cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\n path := \"spec.jobTemplate.spec.template.spec.securityContext\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.supplementalGroups'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n" - } - ] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [ - { - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [], - "ruleDependencies": [], - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n" - }, - { - "name": "anonymous-access-enabled", - "attributes": { - "armoBuiltin": true - }, - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "description": "Fails in case anonymous access is enabled on the cluster", - "remediation": "Disable anonymous access by passing the --anonymous-auth=false flag to the kube-apiserver component, or if it's a managed cluster, you can remove any RBAC rules which allow anonymous users to perform actions", - "ruleQuery": "armo_builtins", - "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n\n isAnonymous(rolebinding)\n\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:anonymous\"\n}\n\n\nisAnonymous(binding) {\n subject := binding.subjects[_]\n subject.name == \"system:unauthenticated\"\n}\n" - } - ] - } - ], - "ControlsIDs": [ - "C-0009", - "C-0017", - "C-0256", - "C-0259", - "C-0258", - "C-0257", - "C-0260", - "C-0261", - "C-0255", - "C-0041", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0211", - "C-0262" - ] -} \ No newline at end of file diff --git a/releaseDev/security_frameworks.json b/releaseDev/security_frameworks.json deleted file mode 100644 index d754710dd..000000000 --- a/releaseDev/security_frameworks.json +++ /dev/null @@ -1,520 +0,0 @@ -[ - { - "name": "security", - "description": "Controls that are used to assess security threats.", - "attributes": { - "armoBuiltin": true - }, - "typeTags": [ - "security" - ], - "version": null, - "controls": [ - { - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "long_description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "test": " Check for each container if there is a \u2018limits\u2019 field defined for both cpu and memory", - "controlID": "C-0009", - "baseScore": 7.0, - "example": "@controls/examples/c009.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "long_description": "By default, containers are permitted mostly unrestricted execution within their own context. An attacker who has access to a container, can create files and download scripts as he wishes, and modify the underlying application running on the container. ", - "test": "Check whether the readOnlyRootFilesystem field in the SecurityContext is set to true. ", - "controlID": "C-0017", - "baseScore": 3.0, - "example": "@controls/examples/c017.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Exposure to Internet", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Workload Exposure" - ] - }, - { - "attackTrack": "", - "categories": [ - "" - ] - } - ] - }, - "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", - "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", - "test": "Checks if workloads are exposed through the use of NodePort, LoadBalancer or Ingress", - "controlID": "C-0256", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with credential access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "This control checks if workloads specifications have sensitive information in their environment variables.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "test": "Check if the workload has sensitive information in environment variables, by using list of known sensitive key names.", - "controlID": "C-0259", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with configMap access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined", - "controlID": "C-0258", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with PVC access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Data Access" - ] - } - ] - }, - "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted PVCs by inspecting their specifications and verifying if PVC volumes are defined", - "controlID": "C-0257", - "baseScore": 4.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Missing network policy", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Network" - ] - } - ] - }, - "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", - "test": "Check that all workloads has a network policy configured in labels.", - "controlID": "C-0260", - "baseScore": 5.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "ServiceAccount token mounted", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Credential access" - ] - } - ] - }, - "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", - "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", - "test": "test if ServiceAccount token is mounted on workload and it has at least one binding.", - "controlID": "C-0261", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Workload with secret access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "workload-external-track", - "categories": [ - "Secret Access" - ] - } - ] - }, - "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", - "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", - "test": "Check if any workload has mounted secrets by inspecting their specifications and verifying if secret volumes are defined.", - "controlID": "C-0255", - "baseScore": 8.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "long_description": "We have it in ArmoBest", - "test": "", - "controlID": "C-0041", - "baseScore": 7.0, - "example": "@controls/examples/c041.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "long_description": "Workloads (like pod, deployment, etc) that contain a container with hostport. The problem that arises is that if the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. And any two workloads that specify the same HostPort cannot be deployed to the same node. In addition, if the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods \u2014 Kubernetes reschedules them to a different node if available.\u00a0", - "test": "Check for each workload (with container) if it exists inside the container hostPort.\u00a0\u00a0", - "controlID": "C-0044", - "baseScore": 4.0, - "example": "@controls/examples/c044.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "long_description": "hostPath volume mounts a directory or a file from the host to the container. Attackers who have permissions to create a new container in the cluster may create one with a writable hostPath volume and gain persistence on the underlying host. For example, the latter can be achieved by creating a cron job on the host.", - "test": "Checking in POD spec if there is a hostPath volume, if it has the section mount.readOnly == false (or doesn\u2019t exist) we raise an alert.", - "controlID": "C-0045", - "baseScore": 8.0, - "example": "@controls/examples/c045.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Insecure capabilities", - "attributes": { - "actionRequired": "configuration", - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "long_description": "Giving insecure and unnecessary capabilities for a container can increase the impact of a container compromise.", - "test": "Check capabilities given against a configurable blacklist of insecure capabilities (https://man7.org/linux/man-pages/man7/capabilities.7.html). ", - "controlID": "C-0046", - "baseScore": 7.0, - "example": "@controls/examples/c046.yaml", - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "example": "apiVersion: v1\nkind: Pod\nmetadata:\n name: test-pd\nspec:\n containers:\n - image: k8s.gcr.io/test-webserver\n name: test-container\n volumeMounts:\n - mountPath: /test-pd\n name: test-volume\n volumes:\n - name: test-volume\n hostPath: # This field triggers failure!\n path: /data\n type: Directory\n", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "controlID": "C-0048", - "baseScore": 7.0, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - }, - { - "name": "Apply Security Context to Your Pods and Containers", - "controlID": "C-0211", - "description": "Apply Security Context to Your Pods and Containers", - "long_description": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", - "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", - "test": "Check that pod and container security context fields according to recommendations in CIS Security Benchmark for Docker Containers", - "manual_test": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", - "references": [ - "https://workbench.cisecurity.org/sections/1126667/recommendations/1838636" - ], - "attributes": { - "armoBuiltin": true - }, - "baseScore": 8, - "impact_statement": "If you incorrectly apply security contexts, you may have trouble running the pods.", - "default_value": "By default, no security contexts are automatically applied to pods.", - "scanningScope": { - "matches": [ - "cloud" - ] - }, - "rules": [] - }, - { - "controlID": "C-0262", - "name": "Anonymous access enabled", - "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", - "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", - "test": "Checks if ClusterRoleBinding/RoleBinding resources give permissions to anonymous user. Also checks in the apiserver if the --anonymous-auth flag is set to false", - "attributes": { - "armoBuiltin": true - }, - "baseScore": 5, - "scanningScope": { - "matches": [ - "cluster", - "file" - ] - }, - "rules": [] - } - ], - "ControlsIDs": [ - "C-0009", - "C-0017", - "C-0256", - "C-0259", - "C-0258", - "C-0257", - "C-0260", - "C-0261", - "C-0255", - "C-0041", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0211", - "C-0262" - ] - } -] \ No newline at end of file From 6fae3b4acb6ade1c2c0d715f96f53bbdeb5ee00c Mon Sep 17 00:00:00 2001 From: Raziel Cohen Date: Thu, 20 Jul 2023 13:47:25 +0300 Subject: [PATCH 8/8] revert changes Signed-off-by: Raziel Cohen --- gitregostore/datastructures.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gitregostore/datastructures.go b/gitregostore/datastructures.go index 6218ca55f..7cd65964c 100644 --- a/gitregostore/datastructures.go +++ b/gitregostore/datastructures.go @@ -99,7 +99,7 @@ func (gs *GitRegoStore) SetRegoObjects() error { // NewDefaultGitRegoStore - generates git store object for production regolibrary release files. // Release files source: "https://github.com/kubescape/regolibrary/releases/latest/download" func NewDefaultGitRegoStore(frequency int) *GitRegoStore { - gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releaseDev", "/", "scanning-scope", frequency) + gs := NewGitRegoStore("https://github.com", "kubescape", "regolibrary", "releases", "latest/download", "", frequency) return gs }